def __init__(self, logger_arg, log_file, debug_mode=False):
        """Constructor for KafkaUtils that sets up the logging module and AmbariHelper.
        """
        self.debug_mode = debug_mode
        self.logger = logger_arg
        self.logger.setLevel(logging.DEBUG)

        logging_format = '%(asctime)s - %(filename)s - %(name)s - %(levelname)s - %(message)s'
        # create console handler and set level to info
        handler = logging.StreamHandler()
        handler.setLevel(logging.INFO)
        formatter = logging.Formatter(logging_format)
        handler.setFormatter(formatter)
        self.logger.addHandler(handler)

        # create file handler and set level to debug
        handler = logging.FileHandler(log_file, 'a')
        handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter(logging_format)
        handler.setFormatter(formatter)
        self.logger.addHandler(handler)

        self.logger.info('Log file: {0}'.format(log_file))

        self.ambari_helper = AmbariHelper()
        self.cluster_manifest = self.ambari_helper.get_cluster_manifest()
        self.cluster_name = self.cluster_manifest.deployment.cluster_name
Exemple #2
0
    def getBrokerInformation(self):
        ambari_helper = AmbariHelper()
        cluster_manifest = ambari_helper.get_cluster_manifest()
        cluster_name = cluster_manifest.deployment.cluster_name
        hosts_result = ambari_helper.query_url(
            'clusters/{0}/hosts'.format(cluster_name))

        zookeepers = reduce(
            lambda r1, r2: r1 + ',' + r2,
            map(
                lambda m: m['Hosts']['host_name'] + ':2181',
                filter(
                    lambda h: h['Hosts']['host_name'].startswith(
                        cluster_manifest.settings[
                            Constants.ZOOKEEPER_VM_NAME_PREFIX_SETTING_KEY]),
                    hosts_result['items'])))
        self.logger.info("zookeepers: {0}\n".format(zookeepers))

        broker_hosts = map(
            lambda m: m['Hosts']['host_name'],
            filter(
                lambda h: h['Hosts']['host_name'].startswith(
                    cluster_manifest.settings[
                        Constants.WORKERNODE_VM_NAME_PREFIX_SETTING_KEY]),
                hosts_result['items']))
        self.logger.info("broker_hosts: {0}\n".format(broker_hosts))

        brokers = reduce(lambda r1, r2: r1 + ',' + r2,
                         map(lambda m: m + ':9092', broker_hosts))
        self.logger.info("brokers: {0}\n".format(brokers))

        return zookeepers, broker_hosts, brokers
def get_kafka_log_dirs():
    ah = AmbariHelper()
    json_output = ah.query_url(
        "clusters/" + ah.cluster_name() +
        "/configurations/service_config_versions?service_name.in(KAFKA)&is_current=true"
    )
    kafka_brokers_configs = [
        element for element in json_output["items"][0]["configurations"]
        if element["type"] == KAFKA_BROKER
    ][0]
    return kafka_brokers_configs["properties"]["log.dirs"].split(',')
def get_zookeeper_connect_string():
    ah = AmbariHelper()
    hosts = ah.get_host_components()
    zkHosts = ""
    for item in hosts["items"]:
        if item["HostRoles"]["component_name"] == "ZOOKEEPER_SERVER":
            zkHosts += item["HostRoles"]["host_name"]
            zkHosts += ZOOKEEPER_PORT
            zkHosts += ","
    if len(zkHosts) > 2:
        return zkHosts[:-1]
    else:
        raise Exception("Failed to get Zookeeper information from Ambari!")
def get_cluster_topology_json():
    ambariHelper = AmbariHelper()
    cluster_manifest = ambariHelper.get_cluster_manifest()
    settings = cluster_manifest.settings
    if "cluster_topology_json_url" in settings:
        json_url = settings["cluster_topology_json_url"]
        logger.info("Retrieved Cluster Topology JSON document.")
        logger.info("URL: %s", json_url)
        r = requests.get(json_url)
        topology_info = r.text
        logger.debug("Cluster Topology: %s", topology_info)
        return topology_info
    else:
        raise Exception(
            "Failed to get cluster_topology_json_url from cluster manifest")
from hdinsight_common.AmbariHelper import AmbariHelper
import requests, json, re
import subprocess, sys
import pty

if(len(sys.argv) < 2):
    print "Error: Usage: restartdeadregionserverswithlocalhostissue.py <ssh-username>"
    sys.exit(1)

a=AmbariHelper()
result=a.query_url("clusters/"+a.cluster_name()+"/hosts")

for i in result['items']:
    if(i['Hosts']['host_name'].startswith("zk")):
        result_hbase_metrics=a.query_url("clusters/"+a.cluster_name()+"/hosts/"+i['Hosts']['host_name']+"/host_components/HBASE_MASTER")
        if(result_hbase_metrics['metrics']['hbase']['master']['IsActiveMaster'] == 'true'):
            url="http://"+i['Hosts']['host_name']+":16010/jmx?qry=Hadoop:service=HBase,name=Master,sub=Server"
            req=requests.get(url)
            res=req.json()
            deadregionservers = res['beans'][0]['tag.deadRegionServers'].split(';')

BASH_PATH="/bin/bash"
HBASE_DAEMON_PATH="/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh"
HBASE_CONFIG_PATH="/usr/hdp/current/hbase-regionserver/conf"

COMMANDLOGTAIL="sudo tail -n100 /var/log/hbase/hbase-hbase-regionserver-wn*.log"
COMMANDSTOP="sudo -u hbase " + BASH_PATH + " " + HBASE_DAEMON_PATH + " --config " + HBASE_CONFIG_PATH + " stop regionserver"
COMMANDSTART="sudo -u hbase " + BASH_PATH + " " + HBASE_DAEMON_PATH + " --config " + HBASE_CONFIG_PATH + " start regionserver"
LOCALHOSTISSUESTRING = 'WARN  \[regionserver/localhost/127.0.0.1:16020\] regionserver.HRegionServer: error telling master we are up'
SSHUSER = sys.argv[1] 
for deadregionserver in filter(None,deadregionservers):
        raise_error(
            "Failed to get cluster_topology_json_url from cluster manifest")


def parse_topo_info(cluster_topology_json, fqdn_suffix):
    print cluster_topology_json
    workernode_info = json.loads(
        cluster_topology_json)["hostGroups"]["workernode"]
    host_info = []
    for node in workernode_info:
        host = {
            UPDATE_DOMAIN: str(node[UPDATE_DOMAIN]),
            FQDN: node[FQDN] + "." + str(fqdn_suffix),
            RACK: "/rack" + str(node[UPDATE_DOMAIN])
        }
        host_info.append(host)
    return host_info


ambariHelper = AmbariHelper()
cluster_topology_json = get_cluster_topology_json(
    ambariHelper.get_cluster_manifest())
host_info = parse_topo_info(cluster_topology_json,
                            ambariHelper.get_fqdn().split('.', 1)[1])
cluster_name = ambariHelper.cluster_name()
for node in host_info:
    ambariHelper.request_url(
        "clusters/" + cluster_name + "/hosts/" + str(node[FQDN]), "PUT",
        "{\"Hosts\":{\"rack_info\":\"" + str(node[RACK]) + "\"}}")
ambariHelper.restart_all_stale_services()