Пример #1
0
    def post_upgrade_restart(self, env, upgrade_type=None):
        if upgrade_type == "nonrolling":
            return

        Logger.info("Executing Stack Upgrade post-restart")
        import params
        env.set_params(params)
        zk_server_host = random.choice(params.zookeeper_hosts)
        cli_shell = format(
            "{zk_cli_shell} -server {zk_server_host}:{client_port}")
        # Ensure that a quorum is still formed.
        unique = get_unique_id_and_date()
        create_command = format("echo 'create /{unique} mydata' | {cli_shell}")
        list_command = format("echo 'ls /' | {cli_shell}")
        delete_command = format("echo 'delete /{unique} ' | {cli_shell}")

        quorum_err_message = "Failed to establish zookeeper quorum"
        call_and_match_output(create_command, 'Created', quorum_err_message)
        call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]",
                              quorum_err_message)
        call(delete_command)

        if params.client_port:
            check_leader_command = format(
                "echo stat | nc localhost {client_port} | grep Mode")
            code, out = call(check_leader_command, logoutput=False)
            if code == 0 and out:
                Logger.info(out)
Пример #2
0
    def service_check(self, env):
        import params
        env.set_params(params)

        # TODO, Kafka was introduced in HDP 2.2 but will not support running in a kerberized cluster until HDP 2.3 (tentatively)
        # Kafka uses its own Zookeeper instance and it does not yet have the capability of running in a secure mode.
        kafka_config = self.read_kafka_config()

        create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
        create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."

        source_cmd = format("source {conf_dir}/kafka-env.sh")
        create_topic_cmd = format(
            "{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic ambari_kafka_service_check --partitions 1 --replication-factor 1"
        )
        command = source_cmd + " ; " + create_topic_cmd

        Logger.info("Running kafka create topic command: %s" % command)
        call_and_match_output(
            command,
            format(
                "({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"
            ),
            "Failed to check that topic exists",
            user=params.kafka_user)
Пример #3
0
    def service_check(self, env):
        import params
        env.set_params(params)

        url = "http://" + params.solr_server_host + ":" + str(
            params.solr_port) + "/solr/"

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};"
            )
            Execute(kinit_cmd, user=params.smokeuser, logoutput=True)

        create_collection_cmd = format(
            "SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr create -c smokeuser_ExampleCollection -s 2 -d data_driven_schema_configs"
        )
        Logger.info("Creating solr collection from example: %s" %
                    create_collection_cmd)
        Execute(create_collection_cmd, user=params.smokeuser, logoutput=True)

        list_collection_cmd = "su " + params.smokeuser + " -c 'curl -s --negotiate -u : " + url + "admin/collections?action=list'"
        list_collection_output = "<str>smokeuser_ExampleCollection</str>"
        Logger.info("List Collections: %s" % list_collection_cmd)
        call_and_match_output(
            list_collection_cmd, format("({list_collection_output})"),
            "Failed to create collection \"smokeuser_ExampleCollection\" or check that collection exists"
        )

        delete_collection_cmd = format(
            "SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr delete -c smokeuser_ExampleCollection"
        )
        Logger.info("Deleting solr collection : %s" % delete_collection_cmd)
        Execute(delete_collection_cmd, user=params.smokeuser, logoutput=True)
Пример #4
0
    def service_check(self, env):
        import params
        env.set_params(params)

        # TODO, Kafka Service check should be more robust , It should get all the broker_hosts
        # Produce some messages and check if consumer reads same no.of messages.

        kafka_config = self.read_kafka_config()
        topic = "ambari_kafka_service_check"
        create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
        create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."
        source_cmd = format("source {conf_dir}/kafka-env.sh")
        topic_exists_cmd = format(
            "{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --topic {topic} --list"
        )
        topic_exists_cmd_p = subprocess.Popen(topic_exists_cmd.split(" "),
                                              stdout=subprocess.PIPE,
                                              stderr=subprocess.PIPE)
        topic_exists_cmd_out, topic_exists_cmd_err = topic_exists_cmd_p.communicate(
        )
        # run create topic command only if the topic doesn't exists
        if topic not in topic_exists_cmd_out:
            create_topic_cmd = format(
                "{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1"
            )
            command = source_cmd + " ; " + create_topic_cmd
            Logger.info("Running kafka create topic command: %s" % command)
            call_and_match_output(
                command,
                format(
                    "({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"
                ),
                "Failed to check that topic exists",
                user=params.kafka_user)
  def post_upgrade_restart(self, env, upgrade_type=None):
    if upgrade_type == "nonrolling":
      return

    Logger.info("Executing Stack Upgrade post-restart")
    import params
    env.set_params(params)
    zk_server_host = random.choice(params.zookeeper_hosts)
    cli_shell = format("{zk_cli_shell} -server {zk_server_host}:{client_port}")
    # Ensure that a quorum is still formed.
    unique = get_unique_id_and_date()
    create_command = format("echo 'create /{unique} mydata' | {cli_shell}")
    list_command = format("echo 'ls /' | {cli_shell}")
    delete_command = format("echo 'delete /{unique} ' | {cli_shell}")

    quorum_err_message = "Failed to establish zookeeper quorum"
    call_and_match_output(create_command, 'Created', quorum_err_message, user=params.zk_user)
    call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]", quorum_err_message, user=params.zk_user)
    shell.call(delete_command, user=params.zk_user)

    if params.client_port:
      check_leader_command = format("echo stat | nc localhost {client_port} | grep Mode")
      code, out = shell.call(check_leader_command, logoutput=False)
      if code == 0 and out:
        Logger.info(out)
Пример #6
0
    def service_check(self, env):
        import params
        env.set_params(params)

        # TODO, Kafka Service check should be more robust , It should get all the broker_hosts
        # Produce some messages and check if consumer reads same no.of messages.

        kafka_config = self.read_kafka_config()
        topic = "ambari_kafka_service_check"
        create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
        create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."
        source_cmd = format("source {conf_dir}/kafka-env.sh")
        topic_exists_cmd = format(
            source_cmd + " ; " +
            "{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --topic {topic} --list"
        )
        topic_exists_cmd_code, topic_exists_cmd_out = shell.call(
            topic_exists_cmd,
            logoutput=True,
            quiet=False,
            user=params.kafka_user)

        if topic_exists_cmd_code > 0:
            raise Fail(
                "Error encountered when attempting to list topics: {0}".format(
                    topic_exists_cmd_out))

        if not params.kafka_delete_topic_enable:
            Logger.info(
                'Kafka delete.topic.enable is not enabled. Skipping topic creation: %s'
                % topic)
            return

    # run create topic command only if the topic doesn't exists

        delete_topic_cmd = format(
            "{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --delete --topic {topic}"
        )
        create_topic_cmd = format(
            "{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1"
        )
        if topic in topic_exists_cmd_out:
            # run delete topic and recreate the topic command only if the topic exists
            command = source_cmd + " ; " + delete_topic_cmd + ";" + create_topic_cmd
        else:
            # run create topic command
            command = source_cmd + " ; " + create_topic_cmd
        Logger.info("Running kafka create topic command: %s" % command)
        call_and_match_output(
            command,
            format(
                "({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"
            ),
            "Failed to check that topic exists",
            user=params.kafka_user)
Пример #7
0
def solr_service(action='start'):
  import params
  cmd = format("{solr_home}/bin/solr")

  if action == 'start':

    if params.security_enabled:
      if params.solr_principal is None:
        solr_principal_with_host = 'missing_principal'
      else:
        solr_principal_with_host = params.solr_principal.replace("_HOST", params.hostname)
      kinit_cmd = format("{kinit_path_local} -kt {solr_keytab} {solr_principal_with_host};")
      Execute(kinit_cmd,user=params.solr_user)

    Execute ('echo "Creating znode" ' + params.zookeeper_chroot)
    Execute (params.cloud_scripts + '/zkcli.sh -zkhost ' + params.zookeeper_hosts_list + ' -cmd makepath ' + params.zookeeper_chroot, user=params.solr_user, ignore_failures=True )

    # copy titan directory and jar for titan and solr integration
    if (('titan-env' in params.configuration_tags) and not (os.path.exists(params.solr_conf_trg_file))):
            Execute(("cp", "-r", params.titan_solr_conf_dir, params.solr_conf_trg_dir), sudo = True)
            Execute(("cp", params.titan_solr_jar_file, params.solr_jar_trg_file), sudo = True)
            Execute(("chmod", "644", params.solr_jar_trg_file), sudo=True)
            Execute(("mv", params.solr_solr_conf_dir, params.solr_titan_conf_dir), sudo = True)

    daemon_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} start -c -V")
    no_op_test = format("ls {solr_pid_file} >/dev/null 2>&1 && ps `cat {solr_pid_file}` >/dev/null 2>&1")
    Execute(daemon_cmd,
            not_if=no_op_test,
            user=params.solr_user
    )

    if not params.upgrade_direction: #Only do this for a fresh IOP 4.2 cluster
      # create collection for titan and solr integration
      if (('titan-env' in params.configuration_tags) and (os.path.exists(params.solr_conf_trg_file))):
          create_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr create -c {titan_solr_configset} -s 2 -d {titan_solr_configset}")
          create_collection_output = "success"
          create_collection_exists_output = format("Collection '{titan_solr_configset}' already exists!")
          call_and_match_output(create_collection_cmd, format("({create_collection_output})|({create_collection_exists_output})"), "Failed to create collection")

  elif action == 'stop':
    daemon_cmd = format("export SOLR_PID_DIR=" + params.pid_dir + "; SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} stop -all")
    no_op_test = format("! ((`SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} status |grep process |wc -l`))")
    rm_pid = format("rm -f {solr_pid_file}")
    Execute(daemon_cmd,
            not_if=no_op_test,
            user=params.solr_user
    )
    Execute(rm_pid)
Пример #8
0
  def service_check(self, env):
    import params
    env.set_params(params)

    # TODO, Kafka was introduced in HDP 2.2 but will not support running in a kerberized cluster until HDP 2.3 (tentatively)
    # Kafka uses its own Zookeeper instance and it does not yet have the capability of running in a secure mode.
    kafka_config = self.read_kafka_config()

    create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
    create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."

    source_cmd = format("source {conf_dir}/kafka-env.sh")
    create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic ambari_kafka_service_check --partitions 1 --replication-factor 1")
    command = source_cmd + " ; " + create_topic_cmd

    Logger.info("Running kafka create topic command: %s" % command)
    call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists")
Пример #9
0
  def service_check(self, env):
    import params
    env.set_params(params)

    # TODO, Kafka Service check should be more robust , It should get all the broker_hosts
    # Produce some messages and check if consumer reads same no.of messages.
    
    kafka_config = self.read_kafka_config()
    topic = "ambari_kafka_service_check"
    create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
    create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."
    source_cmd = format("source {conf_dir}/kafka-env.sh")
    topic_exists_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --topic {topic} --list")
    topic_exists_cmd_p = subprocess.Popen(topic_exists_cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    topic_exists_cmd_out, topic_exists_cmd_err = topic_exists_cmd_p.communicate()
    # run create topic command only if the topic doesn't exists
    if topic not in topic_exists_cmd_out:
      create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic {topic} --partitions 1 --replication-factor 1")
      command = source_cmd + " ; " + create_topic_cmd
      Logger.info("Running kafka create topic command: %s" % command)
      call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists", user=params.kafka_user)
Пример #10
0
  def service_check(self, env):
    import params
    env.set_params(params)

    # TODO, ClickHouse Service check should be more robust , It should get all the broker_hosts
    # Produce some messages and check if consumer reads same no.of messages.
    
    clickhouse_hosts_config = self.read_clickhouse_config()

    create_local_table = 'CREATE TABLE ambari_clickhouse_service_check_local (FlightDate Date,Year UInt16) ENGINE = MergeTree(FlightDate, (Year, FlightDate), 8192)'
    create_all_table = 'CREATE TABLE ambari_clickhouse_service_check_all AS ambari_clickhouse_service_check_local ENGINE = Distributed(ck_cluster, default, ambari_clickhouse_service_check_local, rand())'

    # delete all node table
    for host in clickhouse_hosts_config:
      table_list_sql = format("/usr/bin/clickhouse-client -h {host} -d default -m -u admin --password admin -q 'show tables' ")
      
      import os

      table_list = os.popen(table_list_sql).read().split("\n")

      if 'ambari_clickhouse_service_check_local' in table_list:
        drop_table_sql_local = format("/usr/bin/clickhouse-client -h {host} -d default -m -u admin --password admin -q 'drop table ambari_clickhouse_service_check_local' ")
        Logger.info("Running clickhouse-client drop table {host} command: %s" % drop_table_sql_local)
        
        Execute(
        drop_table_sql_local,
        user=params.clickhouse_user,
        timeout=900,
        logoutput=True)

      if 'ambari_clickhouse_service_check_all' in table_list:
        drop_table_sql_all = format("/usr/bin/clickhouse-client -h {host} -d default -m -u admin --password admin -q 'drop table ambari_clickhouse_service_check_all' ")
        Logger.info("Running clickhouse-client drop table  {host} command: %s" % drop_table_sql_all)
        
        Execute(
        drop_table_sql_all,
        user=params.clickhouse_user,
        timeout=900,
        logoutput=True)

    # create table for all node
    for host in clickhouse_hosts_config:

      create_sql_cmd_local = format("/usr/bin/clickhouse-client -h {host} -d default -m -u admin --password admin -q '{create_local_table}' ")

      Execute(
        create_sql_cmd_local,
        user=params.clickhouse_user,
        timeout=900,
        logoutput=True)

      create_sql_cmd_all = format("/usr/bin/clickhouse-client -h {host} -d default -m -u admin --password admin -q '{create_all_table}' ")

      Execute(
        create_sql_cmd_all,
        user=params.clickhouse_user,
        timeout=900,
        logoutput=True)

      insert_sql_cmd_all = format("/usr/bin/clickhouse-client -h {host} -d default -m -u admin --password admin -q 'insert into ambari_clickhouse_service_check_local (FlightDate,Year) values(now(),toYear(now()))' ")

      Execute(
        insert_sql_cmd_all,
        user=params.clickhouse_user,
        timeout=900,
        logoutput=True)

      command = create_sql_cmd_local + ";" + create_sql_cmd_all + ";" + insert_sql_cmd_all
      Logger.info("Running clickhouse-client create and insert command: %s" % command)
    
    statistics_sql = format("/usr/bin/clickhouse-client -h 127.0.0.1 -d default -m -u admin --password admin -q 'select count(*) from ambari_clickhouse_service_check_all' ")
    
    call_and_match_output(statistics_sql, format(str(len(clickhouse_hosts_config))), "SERVICE CHECK FAILED: statistics_sql exec failed.", user=params.clickhouse_user)