Example #1
0
    def service_check(self, env):
        import params
        env.set_params(params)

        if params.stack_version_formatted and check_stack_feature(
                StackFeature.COPY_TARBALL_TO_HDFS,
                params.stack_version_formatted):
            copy_to_hdfs("slider",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.sysprep_skip_copy_tarballs_hdfs)

        smokeuser_kinit_cmd = format(
            "{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal};"
        ) if params.security_enabled else ""

        servicecheckcmd = format("{smokeuser_kinit_cmd} {slider_cmd} list")

        Execute(
            servicecheckcmd,
            tries=3,
            try_sleep=5,
            user=params.smokeuser,
            logoutput=True,
        )
Example #2
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Hive Server Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            hdp_select.select("hive-server2", params.version)

            # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
            resource_created = copy_to_hdfs(
                "mapreduce",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped)

            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped) or resource_created

            if resource_created:
                params.HdfsResource(None, action="execute")
Example #3
0
    def start(self, env, upgrade_type=None):
        import params
        env.set_params(params)
        self.configure(env)  # FOR SECURITY

        if check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS,
                               params.version_for_stack_feature_checks):
            # MC Hammer said, "Can't touch this"
            resource_created = copy_to_hdfs(
                "mapreduce",
                params.user_group,
                params.hdfs_user,
                skip=params.sysprep_skip_copy_tarballs_hdfs)
            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                skip=params.sysprep_skip_copy_tarballs_hdfs
            ) or resource_created
            resource_created = copy_to_hdfs(
                "slider",
                params.user_group,
                params.hdfs_user,
                skip=params.sysprep_skip_copy_tarballs_hdfs
            ) or resource_created
            if resource_created:
                params.HdfsResource(None, action="execute")
        else:
            # In stack versions before copy_tarball_to_hdfs support tez.tar.gz was copied to a different folder in HDFS.
            install_tez_jars()

        service('historyserver', action='start', serviceName='mapreduce')
Example #4
0
    def service_check(self, env):
        import params
        env.set_params(params)

        path_to_tez_jar = format(params.tez_examples_jar)
        wordcount_command = format(
            "jar {path_to_tez_jar} orderedwordcount /tmp/tezsmokeinput/sample-tez-test /tmp/tezsmokeoutput/"
        )
        test_command = format("fs -test -e /tmp/tezsmokeoutput/_SUCCESS")

        File(format("{tmp_dir}/sample-tez-test"),
             content="foo\nbar\nfoo\nbar\nfoo",
             mode=0755)

        params.HdfsResource("/tmp/tezsmokeoutput",
                            action="delete_on_execute",
                            type="directory")

        params.HdfsResource(
            "/tmp/tezsmokeinput",
            action="create_on_execute",
            type="directory",
            owner=params.smokeuser,
        )
        params.HdfsResource(
            "/tmp/tezsmokeinput/sample-tez-test",
            action="create_on_execute",
            type="file",
            owner=params.smokeuser,
            source=format("{tmp_dir}/sample-tez-test"),
        )

        if params.stack_version_formatted and compare_versions(
                params.stack_version_formatted, '2.2.0.0') >= 0:
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)

        params.HdfsResource(None, action="execute")

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};"
            )
            Execute(kinit_cmd, user=params.smokeuser)

        ExecuteHadoop(wordcount_command,
                      tries=3,
                      try_sleep=5,
                      user=params.smokeuser,
                      conf_dir=params.hadoop_conf_dir,
                      bin_dir=params.hadoop_bin_dir)

        ExecuteHadoop(test_command,
                      tries=10,
                      try_sleep=6,
                      user=params.smokeuser,
                      conf_dir=params.hadoop_conf_dir,
                      bin_dir=params.hadoop_bin_dir)
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info(
            "Executing Hive Server Interactive Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):
            stack_select.select_packages(params.version)

            # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
            resource_created = copy_to_hdfs(
                "hive",
                params.user_group,
                params.hdfs_user,
                skip=params.sysprep_skip_copy_tarballs_hdfs)

            resource_created = copy_to_hdfs(
                "tez_hive2",
                params.user_group,
                params.hdfs_user,
                skip=params.sysprep_skip_copy_tarballs_hdfs
            ) or resource_created

            resource_created = copy_to_hdfs(
                "yarn",
                params.user_group,
                params.hdfs_user,
                skip=params.sysprep_skip_copy_tarballs_hdfs
            ) or resource_created

            if resource_created:
                params.HdfsResource(None, action="execute")
Example #6
0
    def start(self, env, rolling_restart=False):
        import params
        env.set_params(params)
        self.configure(env)  # FOR SECURITY

        if params.hdp_stack_version_major and compare_versions(
                params.hdp_stack_version_major, '2.2.0.0') >= 0:
            # MC Hammer said, "Can't touch this"
            resource_created = copy_to_hdfs(
                "mapreduce",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped)
            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped) or resource_created
            if resource_created:
                params.HdfsResource(None, action="execute")
        else:
            # In HDP 2.1, tez.tar.gz was copied to a different folder in HDFS.
            install_tez_jars()

        service('historyserver', action='start', serviceName='mapreduce')
Example #7
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Hive Server Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):

            stack_select.select("hive-server2", params.version)

            # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
            resource_created = copy_to_hdfs(
                "mapreduce",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped)

            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped) or resource_created

            if resource_created:
                params.HdfsResource(None, action="execute")
  def service_check(self, env):
    import params
    env.set_params(params)

    path_to_tez_jar = format(params.tez_examples_jar)
    wordcount_command = format("jar {path_to_tez_jar} orderedwordcount /tmp/tezsmokeinput/sample-tez-test /tmp/tezsmokeoutput/")
    test_command = format("fs -test -e /tmp/tezsmokeoutput/_SUCCESS")

    File(format("{tmp_dir}/sample-tez-test"),
      content = "foo\nbar\nfoo\nbar\nfoo",
      mode = 0755
    )

    params.HdfsResource("/tmp/tezsmokeoutput",
      action = "delete_on_execute",
      type = "directory"
    )

    params.HdfsResource("/tmp/tezsmokeinput",
      action = "create_on_execute",
      type = "directory",
      owner = params.smokeuser,
    )
    params.HdfsResource("/tmp/tezsmokeinput/sample-tez-test",
      action = "create_on_execute",
      type = "file",
      owner = params.smokeuser,
      source = format("{tmp_dir}/sample-tez-test"),
    )

    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.2.0.0') >= 0:
      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)

    params.HdfsResource(None, action = "execute")

    if params.security_enabled:
      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
      Execute(kinit_cmd,
              user=params.smokeuser
      )

    ExecuteHadoop(wordcount_command,
      tries = 3,
      try_sleep = 5,
      user = params.smokeuser,
      conf_dir = params.hadoop_conf_dir,
      bin_dir = params.hadoop_bin_dir
    )

    ExecuteHadoop(test_command,
      tries = 10,
      try_sleep = 6,
      user = params.smokeuser,
      conf_dir = params.hadoop_conf_dir,
      bin_dir = params.hadoop_bin_dir
    )
Example #9
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade post-restart")
    import params
    env.set_params(params)

    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      stack_select.select_packages(params.version)
      # MC Hammer said, "Can't touch this"
      copy_to_hdfs("yarn", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
      params.HdfsResource(None, action="execute")
Example #10
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing Rolling Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hadoop", params.version)
            hdp_select.select("hadoop-mapreduce-historyserver", params.version)
            copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
            params.HdfsResource(None, action="execute")
Example #11
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):
            stack_select.select_packages(params.version)
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         use_upgrading_version_during_upgrade=True)
Example #12
0
    def start(self, env, rolling_restart=False):
        import params
        env.set_params(params)
        self.configure(env)  # FOR SECURITY

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
            params.HdfsResource(None, action="execute")

        service('historyserver', action='start', serviceName='mapreduce')
Example #13
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-mapreduce-historyserver", params.version)
      # MC Hammer said, "Can't touch this"
      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      params.HdfsResource(None, action="execute")
Example #14
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      stack_select.select("hadoop-mapreduce-historyserver", params.version)
      # MC Hammer said, "Can't touch this"
      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
      copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
      params.HdfsResource(None, action="execute")
Example #15
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      hdp_select.select("hive-server2", params.version)

      # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
      resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user) or resource_created
      if resource_created:
        params.HdfsResource(None, action="execute")
  def start(self, env, rolling_restart=False):
    import params
    env.set_params(params)
    self.configure(env) # FOR SECURITY

    if params.hdp_stack_version_major and compare_versions(params.hdp_stack_version_major, '2.2.0.0') >= 0:
      # MC Hammer said, "Can't touch this"
      resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user) or resource_created
      if resource_created:
        params.HdfsResource(None, action="execute")
    else:
      # In HDP 2.1, tez.tar.gz was copied to a different folder in HDFS.
      install_tez_jars()

    service('historyserver', action='start', serviceName='mapreduce')
Example #17
0
def spark_service(action):
    import params

    if action == 'start':
        if params.security_enabled:
            spark_kinit_cmd = format(
                "{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; "
            )
            Execute(spark_kinit_cmd, user=params.spark_user)

        # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
        # need to copy the tarball, otherwise, copy it.
        if params.hdp_stack_version and compare_versions(
                params.hdp_stack_version, '2.3.0.0') < 0:
            resource_created = copy_to_hdfs("tez", params.user_group,
                                            params.hdfs_user)
            if resource_created:
                params.HdfsResource(None, action="execute")

        no_op_test = format(
            'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1'
        )
        Execute(format('{spark_history_server_start}'),
                user=params.spark_user,
                environment={'JAVA_HOME': params.java_home},
                not_if=no_op_test)
    elif action == 'stop':
        Execute(format('{spark_history_server_stop}'),
                user=params.spark_user,
                environment={'JAVA_HOME': params.java_home})
        File(params.spark_history_server_pid_file, action="delete")
Example #18
0
    def prepare(self, env):
        """
    During the "Upgrade" direction of a Stack Upgrade, it is necessary to ensure that the older tez tarball
    has been copied to HDFS. This is an additional check for added robustness.
    """
        import params
        env.set_params(params)

        Logger.info(
            "Before starting Stack Upgrade, check if tez tarball has been copied to HDFS."
        )

        if params.hdp_stack_version and compare_versions(
                params.hdp_stack_version, '2.2.0.0') >= 0:
            Logger.info(
                "Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS."
                .format(params.hdp_stack_version))

            # Force it to copy the current version of the tez tarball, rather than the version the RU will go to.
            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                use_ru_version_during_ru=False,
                host_sys_prepped=params.host_sys_prepped)
            if resource_created:
                params.HdfsResource(None, action="execute")
            else:
                raise Fail("Could not copy tez tarball to HDFS.")
Example #19
0
    def prepare(self, env):
        """
    During the "Upgrade" direction of a Stack Upgrade, it is necessary to ensure that the older tez tarball
    has been copied to HDFS. This is an additional check for added robustness.
    """
        import params
        env.set_params(params)

        Logger.info(
            "Before starting Stack Upgrade, check if tez tarball has been copied to HDFS."
        )

        if params.stack_version_formatted and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
            Logger.info(
                "Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS."
                .format(params.stack_version_formatted))

            # Force it to copy the current version of the tez tarball, rather than the version the RU will go to.
            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                use_upgrading_version_during_upgrade=False,
                skip=params.sysprep_skip_copy_tarballs_hdfs)
            if resource_created:
                params.HdfsResource(None, action="execute")
            else:
                raise Fail("Could not copy tez tarball to HDFS.")
def spark_service(action):
  import params
  
  if action == 'start':
    if params.security_enabled:
      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
      Execute(spark_kinit_cmd, user=params.spark_user)

    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
    # need to copy the tarball, otherwise, copy it.
    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.3.0.0') < 0:
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user)
      if resource_created:
        params.HdfsResource(None, action="execute")

    no_op_test = format(
      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
    Execute(format('{spark_history_server_start}'),
            user=params.spark_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=no_op_test
    )
  elif action == 'stop':
    Execute(format('{spark_history_server_stop}'),
            user=params.spark_user,
            environment={'JAVA_HOME': params.java_home}
    )
    File(params.spark_history_server_pid_file,
         action="delete"
    )
Example #21
0
    def start(self, env, upgrade_type=None):
        import params
        env.set_params(params)
        self.configure(env)  # FOR SECURITY

        # MC Hammer said, "Can't touch this"
        resource_created = copy_to_hdfs("mapreduce",
                                        params.user_group,
                                        params.hdfs_user,
                                        skip=params.host_sys_prepped)
        resource_created = copy_to_hdfs(
            "slider",
            params.user_group,
            params.hdfs_user,
            skip=params.host_sys_prepped) or resource_created
        if resource_created:
            params.HdfsResource(None, action="execute")
        service('historyserver', action='start', serviceName='mapreduce')
  def service_check(self, env):
    import params
    env.set_params(params)
    
    if Script.is_hdp_stack_greater_or_equal("2.2"):
      copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
    
    smokeuser_kinit_cmd = format(
      "{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal};") if params.security_enabled else ""

    servicecheckcmd = format("{smokeuser_kinit_cmd} {slider_cmd} list")

    Execute(servicecheckcmd,
            tries=3,
            try_sleep=5,
            user=params.smokeuser,
            logoutput=True
    )
Example #23
0
  def service_check(self, env):
    import params
    env.set_params(params)
    
    if Script.is_stack_greater_or_equal("2.2"):
      copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
    
    smokeuser_kinit_cmd = format(
      "{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal};") if params.security_enabled else ""

    servicecheckcmd = format("{smokeuser_kinit_cmd} {slider_cmd} list")

    Execute(servicecheckcmd,
            tries=3,
            try_sleep=5,
            user=params.smokeuser,
            logoutput=True
    )
Example #24
0
def spark_service(name, action):
    import params

    if action == "start":
        if params.security_enabled:
            spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
            Execute(spark_kinit_cmd, user=params.spark_user)

        # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
        # need to copy the tarball, otherwise, copy it.
        if params.hdp_stack_version and compare_versions(params.hdp_stack_version, "2.3.0.0") < 0:
            resource_created = copy_to_hdfs(
                "tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped
            )
            if resource_created:
                params.HdfsResource(None, action="execute")

        if name == "jobhistoryserver":
            historyserver_no_op_test = format(
                "ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1"
            )
            Execute(
                format("{spark_history_server_start}"),
                user=params.spark_user,
                environment={"JAVA_HOME": params.java_home},
                not_if=historyserver_no_op_test,
            )

        elif name == "sparkthriftserver":
            thriftserver_no_op_test = format(
                "ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1"
            )
            Execute(
                format("{spark_thrift_server_start} --properties-file {spark_thrift_server_conf_file}"),
                user=params.spark_user,
                environment={"JAVA_HOME": params.java_home},
                not_if=thriftserver_no_op_test,
            )
    elif action == "stop":
        if name == "jobhistoryserver":
            Execute(
                format("{spark_history_server_stop}"),
                user=params.spark_user,
                environment={"JAVA_HOME": params.java_home},
            )
            File(params.spark_history_server_pid_file, action="delete")

        elif name == "sparkthriftserver":
            Execute(
                format("{spark_thrift_server_stop}"),
                user=params.spark_user,
                environment={"JAVA_HOME": params.java_home},
            )
            File(params.spark_thrift_server_pid_file, action="delete")
Example #25
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '4.0.0.0') >= 0:
            stack_select.select_packages(params.version)
            #Execute(format("iop-select set hadoop-mapreduce-historyserver {version}"))
            #copy_tarballs_to_hdfs('mapreduce', 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
            # MC Hammer said, "Can't touch this"
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.host_sys_prepped)
            copy_to_hdfs("slider",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.host_sys_prepped)
            params.HdfsResource(None, action="execute")
Example #26
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Hive Server Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if False:
            # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
            resource_created = copy_to_hdfs(
                "mapreduce",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped)

            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped) or resource_created

            if resource_created:
                params.HdfsResource(None, action="execute")
Example #27
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      stack_select.select("hive-server2", params.version)
      #Execute(format("stack-select set hive-server2 {version}"))
      resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
      if resource_created:
        params.HdfsResource(None, action="execute")
Example #28
0
def install_tez():
    import params
    Directory([params.config_dir],
              owner=params.tez_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if not os.path.exists(Script.get_stack_root() + '/' +
                          params.version_dir) or not os.path.exists(
                              params.install_dir):
        Execute('rm -rf %s' % Script.get_stack_root() + '/' +
                params.version_dir)
        Execute('rm -rf %s' % params.install_dir)
        Execute('wget ' + params.download_url + ' -O /tmp/' + params.filename,
                user=params.tez_user)
        Execute('tar -zxf /tmp/' + params.filename + ' -C  ' +
                Script.get_stack_root())
        Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir +
                ' ' + params.install_dir)
        Execute(' rm -rf ' + params.install_dir + '/conf')
        Execute('ln -s ' + params.config_dir + ' ' + params.install_dir +
                '/conf')
        Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/tez.sh" %
                params.install_dir)
        Execute('chown -R %s:%s %s/%s' %
                (params.tez_user, params.user_group, params.stack_root,
                 params.version_dir))
        Execute('chown -R %s:%s %s' %
                (params.tez_user, params.user_group, params.install_dir))
        Execute('/bin/rm -f /tmp/' + params.filename)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.tez_user,
                     custom_source_file=params.stack_root +
                     '/tez/share/tez.tar.gz',
                     custom_dest_file='/apps/tez/tez.tar.gz')
        params.HdfsResource(None, action="execute")
    def configure(self, env):
        import params
        env.set_params(params)

        params.HdfsResource('/apps/hbase/coprocessor',
                            type="directory",
                            action="create_on_execute",
                            owner=params.hdfs_user,
                            group=params.user_group,
                            change_permissions_for_parents=True,
                            mode=0777)
        coprocessor_path = params.install_dir + '/share/hadoop/yarn/timelineservice/hadoop-yarn-server-timelineservice-hbase-coprocessor*.jar'
        coprocessor_path_arr = glob.glob(coprocessor_path)
        if len(coprocessor_path) > 0:
            coprocessor_path = coprocessor_path_arr.pop()
            copy_to_hdfs(
                "yarn",
                params.user_group,
                params.hdfs_user,
                custom_source_file=coprocessor_path,
                custom_dest_file='/apps/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar')
            params.HdfsResource(None, action="execute")

        yarn(name='apptimelineserver')
Example #30
0
    def start(self, env, rolling_restart=False):
        import params

        env.set_params(params)
        self.configure(env)  # FOR SECURITY
        if params.has_ranger_admin and params.is_supported_yarn_ranger:
            setup_ranger_yarn()  #Ranger Yarn Plugin related calls
        if not Script.is_hdp_stack_greater_or_equal("2.2"):
            install_tez_jars()
        else:
            resource_created = copy_to_hdfs("tez", params.user_group,
                                            params.hdfs_user)
            if resource_created:
                params.HdfsResource(None, action="execute")
        service('resourcemanager', action='start')
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            hdp_select.select("spark-historyserver", params.version)

            # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
            # need to copy the tarball, otherwise, copy it.

            if params.version and compare_versions(format_hdp_stack_version(params.version), "2.3.0.0") < 0:
                resource_created = copy_to_hdfs(
                    "tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped
                )
                if resource_created:
                    params.HdfsResource(None, action="execute")
Example #32
0
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            hdp_select.select("spark-historyserver", params.version)

            # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
            # need to copy the tarball, otherwise, copy it.
            if compare_versions(format_hdp_stack_version(params.version),
                                '2.3.0.0') < 0:
                resource_created = copy_to_hdfs("tez", params.user_group,
                                                params.hdfs_user)
                if resource_created:
                    params.HdfsResource(None, action="execute")
Example #33
0
  def start(self, env, upgrade_type=None):
    import params
    env.set_params(params)
    self.configure(env) # FOR SECURITY

    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
      # MC Hammer said, "Can't touch this"
      resource_created = copy_to_hdfs(
        "yarn",
        params.user_group,
        params.hdfs_user,
        skip=params.sysprep_skip_copy_tarballs_hdfs)
      if resource_created:
        params.HdfsResource(None, action="execute")

    if not params.use_external_hbase and not params.is_hbase_system_service_launch:
       hbase(action='start')
    service('timelinereader', action='start')
Example #34
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      conf_select.select(params.stack_name, "hadoop", params.version)
      stack_select.select("hadoop-mapreduce-historyserver", params.version)
      # MC Hammer said, "Can't touch this"
      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
      copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
      copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
      params.HdfsResource(None, action="execute")
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      Logger.info("Executing Spark2 Job History Server Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark2", params.version)
      stack_select.select("spark2-historyserver", params.version)

      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
      # need to copy the tarball, otherwise, copy it.
      if params.version and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version):
        resource_created = copy_to_hdfs(
          "tez",
          params.user_group,
          params.hdfs_user,
          host_sys_prepped=params.host_sys_prepped)
        if resource_created:
          params.HdfsResource(None, action="execute")
Example #36
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      Logger.info("Executing Spark2 Job History Server Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark2", params.version)
      stack_select.select("spark2-historyserver", params.version)

      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
      # need to copy the tarball, otherwise, copy it.
      if params.version and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version):
        resource_created = copy_to_hdfs(
          "tez",
          params.user_group,
          params.hdfs_user,
          host_sys_prepped=params.host_sys_prepped)
        if resource_created:
          params.HdfsResource(None, action="execute")
Example #37
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark", params.version)
      hdp_select.select("spark-historyserver", params.version)

      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
      # need to copy the tarball, otherwise, copy it.

      if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') < 0:
        resource_created = copy_to_hdfs(
          "tez",
          params.user_group,
          params.hdfs_user,
          host_sys_prepped=params.host_sys_prepped)
        if resource_created:
          params.HdfsResource(None, action="execute")
Example #38
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params

        env.set_params(params)
        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):
            Logger.info(
                "Executing Spark Job History Server Stack Upgrade pre-restart")
            # TODO, change to "spark" after RPM switches the name
            stack_select.select_packages(params.version)

            # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
            # need to copy the tarball, otherwise, copy it.
            if params.version and check_stack_feature(
                    StackFeature.TEZ_FOR_SPARK, params.version):
                resource_created = copy_to_hdfs(
                    "tez",
                    params.user_group,
                    params.hdfs_user,
                    skip=params.sysprep_skip_copy_tarballs_hdfs)
                if resource_created:
                    params.HdfsResource(None, action="execute")
Example #39
0
  def start(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    self.configure(env) # FOR SECURITY
    if params.enable_ranger_yarn and params.is_supported_yarn_ranger:
      setup_ranger_yarn() #Ranger Yarn Plugin related calls

    # wait for active-dir and done-dir to be created by ATS if needed
    if params.has_ats:
      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
      self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)

    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
      # MC Hammer said, "Can't touch this"
      resource_created = copy_to_hdfs(
        "yarn",
        params.user_group,
        params.hdfs_user,
        skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
      if resource_created:
        params.HdfsResource(None, action="execute")

    service('resourcemanager', action='start')
Example #40
0
  def prepare(self, env):
    """
    During the "Upgrade" direction of a Rolling Upgrade, it is necessary to ensure that the older tez tarball
    has been copied to HDFS. This is an additional check for added robustness.
    """
    import params
    env.set_params(params)

    Logger.info("Before starting Rolling Upgrade, check if tez tarball has been copied to HDFS.")

    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.2.0.0') >= 0:
      Logger.info("Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS.".format(params.hdp_stack_version))

      # Force it to copy the current version of the tez tarball, rather than the version the RU will go to.
      resource_created = copy_to_hdfs(
        "tez",
        params.user_group,
        params.hdfs_user,
        use_ru_version_during_ru=False,
        host_sys_prepped=params.host_sys_prepped)
      if resource_created:
        params.HdfsResource(None, action="execute")
      else:
        raise Fail("Could not copy tez tarball to HDFS.")
def spark_service(name, upgrade_type=None, action=None):
  import params

  if action == 'start':

    effective_version = params.version if upgrade_type is not None else params.hdp_stack_version
    if effective_version:
      effective_version = format_hdp_stack_version(effective_version)

    if effective_version and compare_versions(effective_version, '2.4.0.0') >= 0:
      # copy spark-hdp-assembly.jar to hdfs
      copy_to_hdfs("spark", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      # create spark history directory
      params.HdfsResource(params.spark_history_dir,
                          type="directory",
                          action="create_on_execute",
                          owner=params.spark_user,
                          group=params.user_group,
                          mode=0777,
                          recursive_chmod=True
                          )
      params.HdfsResource(None, action="execute")

    if params.security_enabled:
      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
      Execute(spark_kinit_cmd, user=params.spark_user)

    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
    # need to copy the tarball, otherwise, copy it.
    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.3.0.0') < 0:
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      if resource_created:
        params.HdfsResource(None, action="execute")

    if name == 'jobhistoryserver':
      historyserver_no_op_test = format(
      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
      Execute(format('{spark_history_server_start}'),
              user=params.spark_user,
              environment={'JAVA_HOME': params.java_home},
              not_if=historyserver_no_op_test)

    elif name == 'sparkthriftserver':
      if params.security_enabled:
        hive_principal = params.hive_kerberos_principal.replace('_HOST', socket.getfqdn().lower())
        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
        Execute(hive_kinit_cmd, user=params.hive_user)

      thriftserver_no_op_test = format(
      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
      Execute(format('{spark_thrift_server_start} --properties-file {spark_thrift_server_conf_file} {spark_thrift_cmd_opts_properties}'),
              user=params.hive_user,
              environment={'JAVA_HOME': params.java_home},
              not_if=thriftserver_no_op_test
      )
  elif action == 'stop':
    if name == 'jobhistoryserver':
      Execute(format('{spark_history_server_stop}'),
              user=params.spark_user,
              environment={'JAVA_HOME': params.java_home}
      )
      File(params.spark_history_server_pid_file,
        action="delete"
      )

    elif name == 'sparkthriftserver':
      Execute(format('{spark_thrift_server_stop}'),
              user=params.hive_user,
              environment={'JAVA_HOME': params.java_home}
      )
      File(params.spark_thrift_server_pid_file,
        action="delete"
      )
  def service_check(self, env):
    import params
    env.set_params(params)

    input_file = format('/user/{smokeuser}/passwd')
    output_dir = format('/user/{smokeuser}/pigsmoke.out')

    params.HdfsResource(output_dir,
                        type="directory",
                        action="delete_on_execute",
                        owner=params.smokeuser,
                        )
    params.HdfsResource(input_file,
                        type="file",
                        source="/etc/passwd",
                        action="create_on_execute",
                        owner=params.smokeuser,
    )
    params.HdfsResource(None, action="execute")
 


    File( format("{tmp_dir}/pigSmoke.sh"),
      content = StaticFile("pigSmoke.sh"),
      mode = 0755
    )

    # check for Pig-on-M/R
    Execute( format("pig {tmp_dir}/pigSmoke.sh"),
      tries     = 3,
      try_sleep = 5,
      path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
      user      = params.smokeuser,
      logoutput = True
    )

    test_cmd = format("fs -test -e {output_dir}")
    ExecuteHadoop( test_cmd,
      user      = params.smokeuser,
      conf_dir = params.hadoop_conf_dir,
      bin_dir = params.hadoop_bin_dir
    )

    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
      # cleanup results from previous test
      params.HdfsResource(output_dir,
                          type="directory",
                          action="delete_on_execute",
                          owner=params.smokeuser,
      )
      params.HdfsResource(input_file,
                          type="file",
                          source="/etc/passwd",
                          action="create_on_execute",
                          owner=params.smokeuser,
      )

      # Check for Pig-on-Tez
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user)
      if resource_created:
        params.HdfsResource(None, action="execute")

      if params.security_enabled:
        kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
        Execute(kinit_cmd,
                user=params.smokeuser
        )

      Execute(format("pig -x tez {tmp_dir}/pigSmoke.sh"),
        tries     = 3,
        try_sleep = 5,
        path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
        user      = params.smokeuser,
        logoutput = True
      )

      ExecuteHadoop(test_cmd,
        user      = params.smokeuser,
        conf_dir = params.hadoop_conf_dir,
        bin_dir = params.hadoop_bin_dir
      )
def spark_service(name, upgrade_type=None, action=None):
  import params

  if action == 'start':

    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
    if effective_version:
      effective_version = format_stack_version(effective_version)

    if effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
      # create & copy spark2-hdp-yarn-archive.tar.gz to hdfs
      source_dir=params.spark_home+"/jars"
      tmp_archive_file="/tmp/spark2/spark2-hdp-yarn-archive.tar.gz"
      make_tarfile(tmp_archive_file, source_dir)
      copy_to_hdfs("spark2", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      # create spark history directory
      params.HdfsResource(params.spark_history_dir,
                          type="directory",
                          action="create_on_execute",
                          owner=params.spark_user,
                          group=params.user_group,
                          mode=0777,
                          recursive_chmod=True
                          )
      params.HdfsResource(None, action="execute")

    if params.security_enabled:
      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
      Execute(spark_kinit_cmd, user=params.spark_user)

    # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
    # need to copy the tarball, otherwise, copy it.
    if params.stack_version_formatted and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.stack_version_formatted):
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      if resource_created:
        params.HdfsResource(None, action="execute")

    if name == 'jobhistoryserver':
      historyserver_no_op_test = format(
      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
      try:
        Execute(format('{spark_history_server_start}'),
                user=params.spark_user,
                environment={'JAVA_HOME': params.java_home},
                not_if=historyserver_no_op_test)
      except:
        show_logs(params.spark_log_dir, user=params.spark_user)
        raise

    elif name == 'sparkthriftserver':
      if params.security_enabled:
        hive_principal = params.hive_kerberos_principal.replace('_HOST', socket.getfqdn().lower())
        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
        Execute(hive_kinit_cmd, user=params.hive_user)

      thriftserver_no_op_test = format(
      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
      try:
        Execute(format('{spark_thrift_server_start} --properties-file {spark_thrift_server_conf_file} {spark_thrift_cmd_opts_properties}'),
                user=params.hive_user,
                environment={'JAVA_HOME': params.java_home},
                not_if=thriftserver_no_op_test
        )
      except:
        show_logs(params.spark_log_dir, user=params.hive_user)
        raise
  elif action == 'stop':
    if name == 'jobhistoryserver':
      try:
        Execute(format('{spark_history_server_stop}'),
                user=params.spark_user,
                environment={'JAVA_HOME': params.java_home}
        )
      except:
        show_logs(params.spark_log_dir, user=params.spark_user)
        raise
      File(params.spark_history_server_pid_file,
        action="delete"
      )

    elif name == 'sparkthriftserver':
      try:
        Execute(format('{spark_thrift_server_stop}'),
                user=params.hive_user,
                environment={'JAVA_HOME': params.java_home}
        )
      except:
        show_logs(params.spark_log_dir, user=params.hive_user)
        raise
      File(params.spark_thrift_server_pid_file,
        action="delete"
      )
Example #44
0
def hive(name=None):
  import params

  if name == 'hiveserver2':
    # HDP 2.1.* or lower
    if params.hdp_stack_version_major != "" and compare_versions(params.hdp_stack_version_major, "2.2.0.0") < 0:
      params.HdfsResource(params.webhcat_apps_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=0755
                          )
    
    # Create webhcat dirs.
    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
      params.HdfsResource(params.hcat_hdfs_user_dir,
                           type="directory",
                           action="create_on_execute",
                           owner=params.hcat_user,
                           mode=params.hcat_hdfs_user_mode
      )

    params.HdfsResource(params.webhcat_hdfs_user_dir,
                         type="directory",
                         action="create_on_execute",
                         owner=params.webhcat_user,
                         mode=params.webhcat_hdfs_user_mode
    )

    # ****** Begin Copy Tarballs ******
    # *********************************
    # HDP 2.2 or higher, copy mapreduce.tar.gz to HDFS
    if params.hdp_stack_version_major != "" and compare_versions(params.hdp_stack_version_major, '2.2') >= 0:
      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)

    # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
    # This can use a different source and dest location to account for both HDP 2.1 and 2.2
    copy_to_hdfs("pig",
                 params.user_group,
                 params.hdfs_user,
                 file_mode=params.tarballs_mode,
                 custom_source_file=params.pig_tar_source,
                 custom_dest_file=params.pig_tar_dest_file,
                 host_sys_prepped=params.host_sys_prepped)
    copy_to_hdfs("hive",
                 params.user_group,
                 params.hdfs_user,
                 file_mode=params.tarballs_mode,
                 custom_source_file=params.hive_tar_source,
                 custom_dest_file=params.hive_tar_dest_file,
                 host_sys_prepped=params.host_sys_prepped)

    wildcard_tarballs = ["sqoop", "hadoop_streaming"]
    for tarball_name in wildcard_tarballs:
      source_file_pattern = eval("params." + tarball_name + "_tar_source")
      dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

      if source_file_pattern is None or dest_dir is None:
        continue

      source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
      for source_file in source_files:
        src_filename = os.path.basename(source_file)
        dest_file = os.path.join(dest_dir, src_filename)

        copy_to_hdfs(tarball_name,
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=source_file,
                     custom_dest_file=dest_file,
                     host_sys_prepped=params.host_sys_prepped)
    # ******* End Copy Tarballs *******
    # *********************************

    # Create Hive Metastore Warehouse Dir
    params.HdfsResource(params.hive_apps_whs_dir,
                         type="directory",
                          action="create_on_execute",
                          owner=params.hive_user,
                          mode=0777
    )

    # Create Hive User Dir
    params.HdfsResource(params.hive_hdfs_user_dir,
                         type="directory",
                          action="create_on_execute",
                          owner=params.hive_user,
                          mode=params.hive_hdfs_user_mode
    )
    
    if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
      params.HdfsResource(params.hive_exec_scratchdir,
                           type="directory",
                           action="create_on_execute",
                           owner=params.hive_user,
                           group=params.hdfs_user,
                           mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
      
    params.HdfsResource(None, action="execute")

  Directory(params.hive_etc_dir_prefix,
            mode=0755
  )

  # We should change configurations for client as well as for server.
  # The reason is that stale-configs are service-level, not component.
  for conf_dir in params.hive_conf_dirs_list:
    fill_conf_dir(conf_dir)

  XmlConfig("hive-site.xml",
            conf_dir=params.hive_config_dir,
            configurations=params.hive_site_config,
            configuration_attributes=params.config['configuration_attributes']['hive-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

  setup_atlas_hive()
  
  if params.hive_specific_configs_supported and name == 'hiveserver2':
    XmlConfig("hiveserver2-site.xml",
              conf_dir=params.hive_server_conf_dir,
              configurations=params.config['configurations']['hiveserver2-site'],
              configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
              owner=params.hive_user,
              group=params.user_group,
              mode=0644)
  
  File(format("{hive_config_dir}/hive-env.sh"),
       owner=params.hive_user,
       group=params.user_group,
       content=InlineTemplate(params.hive_env_sh_template)
  )

  # On some OS this folder could be not exists, so we will create it before pushing there files
  Directory(params.limits_conf_dir,
            recursive=True,
            owner='root',
            group='root'
            )

  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
       owner='root',
       group='root',
       mode=0644,
       content=Template("hive.conf.j2")
       )

  if (name == 'metastore' or name == 'hiveserver2') and not os.path.exists(params.target):
    jdbc_connector()

  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
       mode = 0644,
  )

  if name == 'metastore':
    File(params.start_metastore_path,
         mode=0755,
         content=StaticFile('startMetastore.sh')
    )
    if params.init_metastore_schema:
      create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                                 "{hive_bin}/schematool -initSchema "
                                 "-dbType {hive_metastore_db_type} "
                                 "-userName {hive_metastore_user_name} "
                                 "-passWord {hive_metastore_user_passwd!p}")

      check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                                        "{hive_bin}/schematool -info "
                                        "-dbType {hive_metastore_db_type} "
                                        "-userName {hive_metastore_user_name} "
                                        "-passWord {hive_metastore_user_passwd!p}"), params.hive_user)

      # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
      # Fixing it with the hack below:
      quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
      if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
          or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
        quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
      Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
          format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))

      Execute(create_schema_cmd,
              not_if = check_schema_created_cmd,
              user = params.hive_user
      )
  elif name == 'hiveserver2':
    File(params.start_hiveserver2_path,
         mode=0755,
         content=Template(format('{start_hiveserver2_script}'))
    )

  if name != "client":
    crt_directory(params.hive_pid_dir)
    crt_directory(params.hive_log_dir)
    crt_directory(params.hive_var_lib)