Exemple #1
0
 def pre_start(self):
   if self.log_out_files:
     log_folder = self.get_log_folder()
     user = self.get_user()
     
     if log_folder == "":
       Logger.logger.warn("Log folder for current script is not defined")
       return
     
     if user == "":
       Logger.logger.warn("User for current script is not defined")
       return
     
     show_logs(log_folder, user, lines_count=COUNT_OF_LAST_LINES_OF_OUT_FILES_LOGGED, mask=OUT_FILES_MASK)
  def stop(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    pid_file = getPid(params, self.nodeType)
    process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])

    daemon_cmd = get_daemon_cmd(params, self.nodeType, "stop")
    try:
      Execute(daemon_cmd,
              user=params.druid_user,
              only_if=process_id_exists_command,
              )
    except:
      show_logs(params.druid_log_dir, params.druid_user)
      raise
Exemple #3
0
 def stop(self, env, upgrade_type=None):
     import params
     import status_params
     env.set_params(params)
     ensure_base_directories()
     daemon_cmd = format('source {params.conf_dir}/registry-env.sh; {params.registry_bin} stop')
     try:
         Execute(daemon_cmd,
                 user=params.registry_user,
                 )
     except:
         show_logs(params.registry_log_dir, params.registry_user)
         raise
     File(status_params.registry_pid_file,
          action="delete"
          )
Exemple #4
0
 def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     self.configure(env, upgrade_type=upgrade_type)
     if params.is_supported_kafka_ranger:
         setup_ranger_kafka()  #Ranger Kafka Plugin related call
     daemon_cmd = format(
         'source {params.conf_dir}/kafka-env.sh ; {params.kafka_bin} start')
     no_op_test = format(
         'ls {params.kafka_pid_file} >/dev/null 2>&1 && ps -p `cat {params.kafka_pid_file}` >/dev/null 2>&1'
     )
     try:
         Execute(daemon_cmd, user=params.kafka_user, not_if=no_op_test)
     except:
         show_logs(params.kafka_log_dir, params.kafka_user)
         raise
Exemple #5
0
    def start(self, env, upgrade_type=None):
        import params
        env.set_params(params)
        self.configure(env)

        daemon_cmd = format(
            'source {params.conf_dir}/atlas-env.sh ; {params.metadata_start_script}'
        )
        no_op_test = format(
            'ls {params.pid_file} >/dev/null 2>&1 && ps -p `cat {params.pid_file}` >/dev/null 2>&1'
        )

        try:
            Execute(daemon_cmd, user=params.metadata_user, not_if=no_op_test)
        except:
            show_logs(params.log_dir, params.metadata_user)
            raise
Exemple #6
0
    def stop(self, env, upgrade_type=None):
        import params
        env.set_params(params)
        daemon_cmd = format(
            'source {params.conf_dir}/atlas-env.sh; {params.metadata_stop_script}'
        )

        try:
            Execute(
                daemon_cmd,
                user=params.metadata_user,
            )
        except:
            show_logs(params.log_dir, params.metadata_user)
            raise

        File(params.pid_file, action="delete")
Exemple #7
0
  def pre_start(self, env=None):
    """
    Executed before any start method. Posts contents of relevant *.out files to command execution log.
    """
    if self.log_out_files:
      log_folder = self.get_log_folder()
      user = self.get_user()

      if log_folder == "":
        Logger.logger.warn("Log folder for current script is not defined")
        return

      if user == "":
        Logger.logger.warn("User for current script is not defined")
        return

      show_logs(log_folder, user, lines_count=COUNT_OF_LAST_LINES_OF_OUT_FILES_LOGGED, mask=OUT_FILES_MASK)
Exemple #8
0
  def start(self, env, upgrade_type=None):
    import params
    env.set_params(params)
    self.configure(env, upgrade_type=upgrade_type)
    
    daemon_cmd = format('{params.clickhouse_bin} start')
    no_op_test = format('ls {params.clickhouse_pid_file} >/dev/null 2>&1 && ps -p `cat {params.clickhouse_pid_file}` >/dev/null 2>&1')
    try:
      Execute(daemon_cmd,
              user=params.root_user,
              not_if=no_op_test
      )
    except:
      show_logs(params.clickhouse_log_dir, params.root_user)
      raise

    Logger.info(format("Start clickhouse server success"))
def post_upgrade_check():
  '''
  Checks that the NodeManager has rejoined the cluster.
  This function will obtain the Kerberos ticket if security is enabled.
  :return:
  '''
  import params

  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
  if params.security_enabled and params.nodemanager_kinit_cmd:
    Execute(params.nodemanager_kinit_cmd, user=params.yarn_user)

  try:
    _check_nodemanager_startup()
  except Fail:
    show_logs(params.yarn_log_dir, params.yarn_user)
    raise
Exemple #10
0
    def stop_rest_application(self):
        """
        Stop the REST application
        """
        Logger.info('Stopping REST application')

        # Get the pid associated with the service
        pid_file = format("{metron_rest_pid_dir}/{metron_rest_pid}")
        pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=self.__params.metron_user, is_checked_call=False)[1]
        process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")

        if self.__params.security_enabled:
            kinit(self.__params.kinit_path_local,
            self.__params.metron_keytab_path,
            self.__params.metron_principal_name,
            execute_user=self.__params.metron_user)

        # Politely kill
        kill_cmd = ('kill', format("{pid}"))
        Execute(kill_cmd,
                sudo=True,
                not_if = format("! ({process_id_exists_command})")
                )

        # Violently kill
        hard_kill_cmd = ('kill', '-9', format("{pid}"))
        wait_time = 5
        Execute(hard_kill_cmd,
                not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
                sudo=True,
                ignore_failures = True
                )

        try:
            # check if stopped the process, else fail the task
            Execute(format("! ({process_id_exists_command})"),
                tries=20,
                try_sleep=3,
                  )
        except:
            show_logs(self.__params.metron_log_dir, self.__params.metron_user)
            raise

        File(pid_file, action = "delete")
        Logger.info('Done stopping REST application')
Exemple #11
0
    def start(self, env, upgrade_type=None):
        import params
        env.set_params(params)
        install_druid()
        self.configure(env, upgrade_type=upgrade_type)
        daemon_cmd = get_daemon_cmd(params, self.nodeType, "start")
        # Verify Database connection on Druid start
        if params.metadata_storage_type == 'mysql':
            if not params.jdbc_driver_jar or not os.path.isfile(
                    params.connector_download_dir + os.path.sep +
                    params.jdbc_driver_jar):
                path_to_jdbc = params.connector_download_dir + os.path.sep + "*"
                error_message = "Error! Sorry, but we can't find jdbc driver for mysql.So, db connection check can fail." + \
                                "Please run 'ambari-server setup --jdbc-db=mysql --jdbc-driver={path_to_jdbc} on server host.'"
                Logger.error(error_message)
            else:
                path_to_jdbc = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
            db_connection_check_command = format(
                "{params.java8_home}/bin/java -cp {params.check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{params.metadata_storage_url}' {params.metadata_storage_user} {params.metadata_storage_password!p} com.mysql.jdbc.Driver"
            )
        else:
            db_connection_check_command = None

        if db_connection_check_command:
            sudo.chmod(params.check_db_connection_jar, 0755)
            Execute(
                db_connection_check_command,
                tries=5,
                try_sleep=10,
                user=params.druid_user)

        pid_file = getPid(params, self.nodeType)
        process_id_exists_command = as_sudo([
            "test", "-f", pid_file
        ]) + " && " + as_sudo(["pgrep", "-F", pid_file])

        try:
            Execute(
                daemon_cmd,
                user=params.druid_user,
                not_if=process_id_exists_command,
            )
        except:
            show_logs(params.druid_log_dir, params.druid_user)
            raise
    def stop_rest_application(self):
        """
        Stop the REST application
        """
        Logger.info('Stopping REST application')

        # Get the pid associated with the service
        pid_file = format("{metron_rest_pid_dir}/{metron_rest_pid}")
        pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=self.__params.metron_user, is_checked_call=False)[1]
        process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")

        if self.__params.security_enabled:
            kinit(self.__params.kinit_path_local,
            self.__params.metron_keytab_path,
            self.__params.metron_principal_name,
            execute_user=self.__params.metron_user)

        # Politely kill
        kill_cmd = ('kill', format("{pid}"))
        Execute(kill_cmd,
                sudo=True,
                not_if = format("! ({process_id_exists_command})")
                )

        # Violently kill
        hard_kill_cmd = ('kill', '-9', format("{pid}"))
        wait_time = 5
        Execute(hard_kill_cmd,
                not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
                sudo=True,
                ignore_failures = True
                )

        try:
            # check if stopped the process, else fail the task
            Execute(format("! ({process_id_exists_command})"),
                tries=20,
                try_sleep=3,
                  )
        except:
            show_logs(self.__params.metron_log_dir, self.__params.metron_user)
            raise

        File(pid_file, action = "delete")
        Logger.info('Done stopping REST application')
Exemple #13
0
def hbase_service(name, action='start'):  # 'start' or 'stop' or 'status'

    import params

    role = name
    cmd = format("{daemon_script} --config {hbase_conf_dir}")
    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
    pid_expression = as_sudo(["cat", pid_file])
    no_op_test = as_sudo([
        "test", "-f", pid_file
    ]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")

    if action == 'start':
        # Check ambari-metrics-hadoop-sink version is less than 2.7.0.0
        check_installed_metrics_hadoop_sink_version()

        daemon_cmd = format("{cmd} start {role}")

        try:
            Execute(daemon_cmd, not_if=no_op_test, user=params.hbase_user)
        except:
            show_logs(params.log_dir, params.hbase_user)
            raise
    elif action == 'stop':
        daemon_cmd = format("{cmd} stop {role}")

        try:
            Execute(
                daemon_cmd,
                user=params.hbase_user,
                only_if=no_op_test,
                # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
                timeout=params.hbase_regionserver_shutdown_timeout,
                on_timeout=format(
                    "! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"
                ),
            )
        except:
            show_logs(params.log_dir, params.hbase_user)
            raise

        File(
            pid_file,
            action="delete",
        )
Exemple #14
0
  def stop(self, env, upgrade_type=None):
    import params
    env.set_params(params)
    # clickhouse stop flowing
    ensure_base_directories()
    daemon_cmd = format('{params.clickhouse_bin} stop')
    try:
      Execute(daemon_cmd,
              user=params.root_user,
      )
    except:
      show_logs(params.clickhouse_log_dir, params.root_user)
      raise
    File(params.clickhouse_pid_file,
          action = "delete"
    )

    Logger.info(format("stop clickhosue server success"))
Exemple #15
0
    def pre_start(self):
        if self.log_out_files:
            log_folder = self.get_log_folder()
            user = self.get_user()

            if log_folder == "":
                Logger.logger.warn(
                    "Log folder for current script is not defined")
                return

            if user == "":
                Logger.logger.warn("User for current script is not defined")
                return

            show_logs(log_folder,
                      user,
                      lines_count=COUNT_OF_LAST_LINES_OF_OUT_FILES_LOGGED,
                      mask=OUT_FILES_MASK)
Exemple #16
0
 def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     # Kafka package scripts change permissions on folders, so we have to
     # restore permissions after installing repo version bits
     # before attempting to stop Kafka Broker
     ensure_base_directories()
     daemon_cmd = format(
         'source {params.conf_dir}/kafka-env.sh; {params.kafka_bin} stop')
     try:
         Execute(
             daemon_cmd,
             user=params.kafka_user,
         )
     except:
         show_logs(params.kafka_log_dir, params.kafka_user)
         raise
     File(params.kafka_pid_file, action="delete")
Exemple #17
0
 def execute_bootstrap_storage_env(self, params):
     from urlparse import urlparse
     try:
         streamline_storage_database_hostname = \
             urlparse(urlparse(params.streamline_storage_connector_connectorURI)[2])[1].split(":")[0]
         database_admin_jdbc_url = params.database_admin_jdbc_url
         if params.streamline_storage_type == 'postgresql':
             database_admin_jdbc_url = database_admin_jdbc_url + '/postgres'
         bootstrap_storage_initevn_db_cmd = database_admin_jdbc_url + ' ' + params.database_admin_user_name + ' ' + PasswordString(
             params.database_admin_password
         ) + ' ' + params.streamline_storage_connector_user + ' ' + PasswordString(
             params.streamline_storage_connector_password
         ) + ' ' + params.streamline_storage_database
         Execute(params.bootstrap_storage_initevn_run_cmd + ' ' +
                 bootstrap_storage_initevn_db_cmd,
                 user='******')
     except:
         show_logs(params.streamline_log_dir, params.streamline_user)
Exemple #18
0
def hbase_service(name, action='start'):  # 'start' or 'stop'

    import params

    sudo = AMBARI_SUDO_BINARY
    daemon_script = format("{yarn_hbase_bin}/hbase-daemon.sh")
    role = name
    cmd = format("{daemon_script} --config {yarn_hbase_conf_dir}")
    pid_file = format(
        "{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-{role}.pid")
    pid_expression = as_sudo(["cat", pid_file])
    no_op_test = as_sudo([
        "test", "-f", pid_file
    ]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")

    if action == 'start':
        daemon_cmd = format("{cmd} start {role}")

        try:
            Execute(daemon_cmd, not_if=no_op_test, user=params.yarn_hbase_user)
        except:
            show_logs(params.yarn_hbase_log_dir, params.yarn_hbase_user)
            raise
    elif action == 'stop':
        daemon_cmd = format("{cmd} stop {role}")

        try:
            Execute(
                daemon_cmd,
                user=params.yarn_hbase_user,
                only_if=no_op_test,
                timeout=30,
                on_timeout=format(
                    "! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"
                ),
            )
        except:
            show_logs(params.yarn_hbase_log_dir, params.yarn_hbase_user)
            raise

        File(
            pid_file,
            action="delete",
        )
Exemple #19
0
def checkAndStopRegistyDNS(cmd):
  import params
  import status_params

  hadoop_env_exports = {
    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
  }
  componentName = 'registrydns'
  daemon_cmd = format("{cmd} stop {componentName}")
  log_dir = params.yarn_log_dir

  # When registry dns is switched from non-privileged to privileged mode or the other way,
  # then the previous instance of registry dns has a different pid/user.
  # Checking if either of the processes are running and shutting them down if they are.
  for dns_pid_file, dns_user in [(status_params.yarn_registry_dns_priv_pid_file, status_params.root_user),
                         (status_params.yarn_registry_dns_pid_file, params.yarn_user)]:
      process_id_exists_command = as_sudo(["test", "-f", dns_pid_file]) + " && " + as_sudo(["pgrep", "-F", dns_pid_file])
      try:
          Execute(daemon_cmd, only_if = process_id_exists_command, user = dns_user)
      except:
          # When the registry dns port is modified but registry dns is not started
          # immediately, then the configs in yarn-env.sh & yarn-site.xml related
          # to registry dns may have already changed. This introduces a discrepancy
          # between the actual process that is running and the configs.
          # For example, when port is changed from 5300 to 53,
          # then dns port = 53 in yarn-site and YARN_REGISTRYDNS_SECURE_* envs in yarn-env.sh
          # are saved. So, while trying to shutdown the stray non-privileged registry dns process
          # after sometime, yarn daemon from the configs thinks that it needs privileged
          # access and throws an exception. In such cases, we try to kill the stray process.
          show_logs(log_dir, dns_user)
          pass

      process_id_does_not_exist_command = format("! ( {process_id_exists_command} )")
      code, out = shell.call(process_id_does_not_exist_command,
                             env = hadoop_env_exports,
                             tries = 5,
                             try_sleep = 5)
      if code != 0:
          code, out, err = shell.checked_call(("cat", dns_pid_file), sudo=True, env=hadoop_env_exports, stderr=subprocess32.PIPE)
          Logger.info("PID to kill was retrieved: '" + out + "'.")
          out=out.splitlines()[0]
          pid = out
          Execute(("kill", "-9", pid), sudo=True)
          File(dns_pid_file, action="delete")
    def start(self, env, upgrade_type=None):

        import params
        import status_params
        env.set_params(params)
        self.configure(env)

        daemon_cmd = format(
            'source {params.conf_dir}/registry-env.sh ; {params.registry_bin} start'
        )
        no_op_test = format(
            'ls {status_params.registry_pid_file} >/dev/null 2>&1 && ps -p `cat {status_params.registry_pid_file}` >/dev/null 2>&1'
        )

        try:
            Execute(daemon_cmd, user="******", not_if=no_op_test)
        except:
            show_logs(params.registry_log_dir, params.registry_user)
            raise
Exemple #21
0
  def stop(self, env, upgrade_type=None):
    import params
    env.set_params(params)
    daemon_cmd = format('{knox_bin} stop')

    update_knox_logfolder_permissions()

    try:
      Execute(daemon_cmd,
              environment={'JAVA_HOME': params.java_home},
              user=params.knox_user,
      )
    except:
      show_logs(params.knox_logs_dir, params.knox_user)
      raise
    
    File(params.knox_pid_file,
         action="delete",
    )
Exemple #22
0
    def start(self, env):
        import params
        env.set_params(params)
        install_registry()
        self.configure(env)

        if params.stack_registry_support_schema_migrate:
            self.execute_bootstrap(params)

        daemon_cmd = format(
            'source {params.conf_dir}/registry-env.sh ; {params.registry_bin} start'
        )
        no_op_test = format(
            'ls {status_params.registry_pid_file} >/dev/null 2>&1 && ps -p `cat {status_params.registry_pid_file}` >/dev/null 2>&1'
        )
        try:
            Execute(daemon_cmd, user="******", not_if=no_op_test)
        except:
            show_logs(params.registry_log_dir, params.registry_user)
Exemple #23
0
def zookeeper_service(action='start', upgrade_type=None):
    import params

    # This path may be missing after Ambari upgrade. We need to create it.
    if upgrade_type is None and not os.path.exists(os.path.join(params.stack_root,"/current/zookeeper-server")) and params.current_version \
      and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
        conf_select.select(params.stack_name, "zookeeper",
                           params.current_version)
        stack_select.select("zookeeper-server", params.version)

    cmd = format(
        "env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")

    if action == 'start':
        daemon_cmd = format(
            "source {config_dir}/zookeeper-env.sh ; {cmd} start")
        no_op_test = format(
            "ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1"
        )

        try:
            Execute(daemon_cmd, not_if=no_op_test, user=params.zk_user)
        except:
            show_logs(params.zk_log_dir, params.zk_user)
            raise

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};"
            )

            Execute(kinit_cmd, user=params.smokeuser)

    elif action == 'stop':
        daemon_cmd = format(
            "source {config_dir}/zookeeper-env.sh ; {cmd} stop")
        try:
            Execute(daemon_cmd, user=params.zk_user)
        except:
            show_logs(params.zk_log_dir, params.zk_user)
            raise
        File(params.zk_pid_file, action="delete")
Exemple #24
0
    def stop(self, env):
        import params
        env.set_params(params)
        self.configure(env)
        daemon_cmd = self.get_daemon_cmd(params, "stop")

        pid_file = params.superset_pid_dir + '/superset.pid'
        process_id_exists_command = as_sudo([
            "test", "-f", pid_file
        ]) + " && " + as_sudo(["pgrep", "-F", pid_file])

        try:
            Execute(
                daemon_cmd,
                user=params.superset_user,
                only_if=process_id_exists_command,
            )
        except:
            show_logs(params.superset_log_dir, params.superset_user)
            raise
  def start(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    self.configure(env)

    daemon_cmd = format('source {params.conf_dir}/atlas-env.sh ; {params.metadata_start_script}')
    no_op_test = format('ls {params.pid_file} >/dev/null 2>&1 && ps -p `cat {params.pid_file}` >/dev/null 2>&1')
    atlas_hbase_setup_command = format("cat {atlas_hbase_setup} | hbase shell -n")
    secure_atlas_hbase_setup_command = format("kinit -kt {hbase_user_keytab} {hbase_principal_name}; ") + atlas_hbase_setup_command

    if params.stack_supports_atlas_ranger_plugin:
      Logger.info('Atlas plugin is enabled, configuring Atlas plugin.')
      setup_ranger_atlas(upgrade_type=upgrade_type)
    else:
      Logger.info('Atlas plugin is not supported or enabled.')

    try:
      effective_version = format_stack_version(params.version) if upgrade_type is not None else params.stack_version_formatted

      if check_stack_feature(StackFeature.ATLAS_HBASE_SETUP, effective_version):
        if params.security_enabled and params.has_hbase_master:
          Execute(secure_atlas_hbase_setup_command,
                  tries = 5,
                  try_sleep = 10,
                  user=params.hbase_user
          )
        elif params.enable_ranger_hbase and not params.security_enabled:
          Execute(atlas_hbase_setup_command,
                  tries = 5,
                  try_sleep = 10,
                  user=params.hbase_user
          )

      Execute(daemon_cmd,
              user=params.metadata_user,
              not_if=no_op_test
      )
    except:
      show_logs(params.log_dir, params.metadata_user)
      raise
 def execute_bootstrap_storage_env(self, params):
     from urlparse import urlparse
     try:
         #Getting hostname where registry's database would be installed.
         registry_storage_database_hostname = urlparse(
             urlparse(params.registry_storage_connector_connectorURI)
             [2])[1].split(":")[0]
         database_admin_jdbc_url = params.database_admin_jdbc_url
         if params.registry_storage_type == 'postgresql':
             database_admin_jdbc_url = database_admin_jdbc_url + '/postgres'
         bootstrap_storage_initevn_db_cmd = database_admin_jdbc_url + ' ' + params.database_admin_user_name + ' ' + PasswordString(
             params.database_admin_password
         ) + ' ' + params.registry_storage_connector_user + ' ' + PasswordString(
             params.registry_storage_connector_password
         ) + ' ' + params.registry_storage_database
         Execute(params.bootstrap_storage_initevn_run_cmd + ' ' +
                 bootstrap_storage_initevn_db_cmd,
                 user='******')
     except:
         show_logs(params.registry_log_dir, params.registry_user)
         raise
Exemple #27
0
def kms_service(action='start'):
  import params

  env_dict = {'JAVA_HOME': params.java_home}
  if params.db_flavor.lower() == 'sqla':
    env_dict = {'JAVA_HOME': params.java_home, 'LD_LIBRARY_PATH': params.ld_library_path}

  if action == 'start':
    no_op_test = format('ps -ef | grep proc_rangerkms | grep -v grep')
    cmd = format('{kms_home}/ranger-kms start')
    try:
      Execute(cmd, not_if=no_op_test, environment=env_dict, user=format('{kms_user}'))
    except:
      show_logs(params.kms_log_dir, params.kms_user)
      raise
  elif action == 'stop':
    cmd = format('{kms_home}/ranger-kms stop')
    try:
      Execute(cmd, environment=env_dict, user=format('{kms_user}'))
    except:
      show_logs(params.kms_log_dir, params.kms_user)
      raise
Exemple #28
0
def validate_connection(target_path_to_jdbc, hive_lib_path):
  import params

  path_to_jdbc = target_path_to_jdbc
  if not params.jdbc_jar_name:
    path_to_jdbc = format("{hive_lib_path}/") + \
                   params.default_connectors_map[params.hive_jdbc_driver] if params.hive_jdbc_driver in params.default_connectors_map else None
    if not os.path.isfile(path_to_jdbc):
      path_to_jdbc = format("{hive_lib_path}/") + "*"
      error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.hive_jdbc_driver] + \
                      " in hive lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
      Logger.error(error_message)

  db_connection_check_command = format(
    "{java64_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")

  try:
    Execute(db_connection_check_command,
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
  except:
    show_logs(params.hive_log_dir, params.hive_user)
    raise
Exemple #29
0
  def start(self, env, upgrade_type=None):
    import params
    env.set_params(params)
    self.configure(env, upgrade_type=upgrade_type)

    if params.security_enabled:
      if params.version and check_stack_feature(StackFeature.KAFKA_KERBEROS, params.version):
        kafka_kinit_cmd = format("{kinit_path_local} -kt {kafka_keytab_path} {kafka_jaas_principal};")
        Execute(kafka_kinit_cmd, user=params.kafka_user)

    if params.is_supported_kafka_ranger:
      setup_ranger_kafka() #Ranger Kafka Plugin related call 
    daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_bin} start')
    no_op_test = format('ls {params.kafka_pid_file} >/dev/null 2>&1 && ps -p `cat {params.kafka_pid_file}` >/dev/null 2>&1')
    try:
      Execute(daemon_cmd,
              user=params.kafka_user,
              not_if=no_op_test
      )
    except:
      show_logs(params.kafka_log_dir, params.kafka_user)
      raise
Exemple #30
0
def webhcat_service(action='start', upgrade_type=None):
    import params

    environ = {'HADOOP_HOME': params.hadoop_home}

    cmd = format('{webhcat_bin_dir}/webhcat_server.sh')

    if action == 'start':
        if upgrade_type is not None and params.version and params.stack_root:
            environ['HADOOP_HOME'] = format("{stack_root}/{version}/hadoop")

        daemon_cmd = format('cd {hcat_pid_dir} ; {cmd} start')
        no_op_test = format(
            'ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat {webhcat_pid_file}` >/dev/null 2>&1'
        )
        try:
            Execute(daemon_cmd,
                    user=params.webhcat_user,
                    not_if=no_op_test,
                    environment=environ)
        except:
            show_logs(params.hcat_log_dir, params.webhcat_user)
            raise
    elif action == 'stop':
        try:
            # try stopping WebHCat using its own script
            graceful_stop(cmd, environ)
        except Fail:
            show_logs(params.hcat_log_dir, params.webhcat_user)
            Logger.info(traceback.format_exc())

        # this will retrieve the PID
        pid_expression = format("`cat {webhcat_pid_file}`")

        # the PID must exist AND'd with the process must be alive
        # the return code here is going to be 0 IFF both conditions are met correctly
        process_id_exists_command = format(
            "ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p {pid_expression} >/dev/null 2>&1"
        )

        # kill command to run
        daemon_hard_kill_cmd = format("{sudo} kill -9 {pid_expression}")

        # check to ensure that it has stopped by looking for the running PID and then killing
        # it forcefully if it exists - the behavior of not-if/only-if is as follows:
        #   not_if return code IS 0
        #   only_if return code is NOT 0
        Execute(daemon_hard_kill_cmd,
                only_if=process_id_exists_command,
                ignore_failures=True)

        try:
            # check if stopped the process, else fail the task
            Execute(format("! ({process_id_exists_command})"))
        except:
            show_logs(params.hcat_log_dir, params.webhcat_user)
            raise

        File(params.webhcat_pid_file, action="delete")
  def start(self, env, upgrade_type=None):
    import params
    env.set_params(params)
    self.configure(env)

    daemon_cmd = format('source {params.conf_dir}/atlas-env.sh ; {params.metadata_start_script}')
    no_op_test = format('ls {params.pid_file} >/dev/null 2>&1 && ps -p `cat {params.pid_file}` >/dev/null 2>&1')

    if params.stack_supports_atlas_ranger_plugin:
      Logger.info('Atlas plugin is enabled, configuring Atlas plugin.')
      setup_ranger_atlas(upgrade_type = upgrade_type)
    else:
      Logger.info('Atlas plugin is not supported or enabled.')

    try:
      Execute(daemon_cmd,
              user=params.metadata_user,
              not_if=no_op_test
      )
    except:
      show_logs(params.log_dir, params.metadata_user)
      raise
Exemple #32
0
    def start(self, env, upgrade_type=None):
        import params
        import status_params
        env.set_params(params)
        self.configure(env)

        daemon_cmd = format(
            'source {params.conf_dir}/streamline-env.sh ; {params.streamline_bin} start'
        )
        no_op_test = format(
            'ls {status_params.streamline_pid_file} >/dev/null 2>&1 && ps -p `cat {status_params.streamline_pid_file}` >/dev/null 2>&1'
        )
        try:
            Execute(daemon_cmd, user="******", not_if=no_op_test)
        except:
            show_logs(params.streamline_log_dir, params.streamline_user)
            raise

        if not os.path.isfile(params.bootstrap_file):

            try:
                if params.security_enabled:
                    kinit_cmd = format(
                        "{kinit_path_local} -kt {params.streamline_keytab_path} {params.streamline_jaas_principal};"
                    )
                    return_code, out = shell.checked_call(
                        kinit_cmd,
                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
                        user=params.streamline_user)
                wait_until_server_starts()
                Execute(params.bootstrap_run_cmd, user=params.streamline_user)
                File(params.bootstrap_file,
                     owner=params.streamline_user,
                     group=params.user_group,
                     mode=0644)
            except:
                show_logs(params.streamline_log_dir, params.streamline_user)
                raise
def spark_service(name, upgrade_type=None, action=None):
  import params

  if action == 'start':

    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
    if effective_version:
      effective_version = format_stack_version(effective_version)

    if effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
      # create & copy spark2-hdp-yarn-archive.tar.gz to hdfs
      source_dir=params.spark_home+"/jars"
      tmp_archive_file="/tmp/spark2/spark2-hdp-yarn-archive.tar.gz"
      make_tarfile(tmp_archive_file, source_dir)
      copy_to_hdfs("spark2", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      # create spark history directory
      params.HdfsResource(params.spark_history_dir,
                          type="directory",
                          action="create_on_execute",
                          owner=params.spark_user,
                          group=params.user_group,
                          mode=0777,
                          recursive_chmod=True
                          )
      params.HdfsResource(None, action="execute")

    if params.security_enabled:
      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
      Execute(spark_kinit_cmd, user=params.spark_user)

    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
    # need to copy the tarball, otherwise, copy it.
    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      if resource_created:
        params.HdfsResource(None, action="execute")

    if name == 'jobhistoryserver':
      historyserver_no_op_test = format(
      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
      try:
        Execute(format('{spark_history_server_start}'),
                user=params.spark_user,
                environment={'JAVA_HOME': params.java_home},
                not_if=historyserver_no_op_test)
      except:
        show_logs(params.spark_log_dir, user=params.spark_user)
        raise

    elif name == 'sparkthriftserver':
      if params.security_enabled:
        hive_principal = params.hive_kerberos_principal.replace('_HOST', socket.getfqdn().lower())
        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
        Execute(hive_kinit_cmd, user=params.hive_user)

      thriftserver_no_op_test = format(
      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
      try:
        Execute(format('{spark_thrift_server_start} --properties-file {spark_thrift_server_conf_file} {spark_thrift_cmd_opts_properties}'),
                user=params.hive_user,
                environment={'JAVA_HOME': params.java_home},
                not_if=thriftserver_no_op_test
        )
      except:
        show_logs(params.spark_log_dir, user=params.hive_user)
        raise
  elif action == 'stop':
    if name == 'jobhistoryserver':
      try:
        Execute(format('{spark_history_server_stop}'),
                user=params.spark_user,
                environment={'JAVA_HOME': params.java_home}
        )
      except:
        show_logs(params.spark_log_dir, user=params.spark_user)
        raise
      File(params.spark_history_server_pid_file,
        action="delete"
      )

    elif name == 'sparkthriftserver':
      try:
        Execute(format('{spark_thrift_server_stop}'),
                user=params.hive_user,
                environment={'JAVA_HOME': params.java_home}
        )
      except:
        show_logs(params.spark_log_dir, user=params.hive_user)
        raise
      File(params.spark_thrift_server_pid_file,
        action="delete"
      )