Esempio n. 1
0
 def run_hawq_check(self, env):
     import params
     Logger.info("Executing HAWQ Check ...")
     params.File(
         hawq_constants.hawq_hosts_file,
         content=InlineTemplate(
             "{% for host in hawq_all_hosts %}{{host}}\n{% endfor %}"))
     Execute(
         "source {0} && hawq check -f {1} --hadoop {2} --config {3}".format(
             hawq_constants.hawq_greenplum_path_file,
             hawq_constants.hawq_hosts_file,
             stack_select.get_hadoop_dir('home'),
             hawq_constants.hawq_check_file),
         user=hawq_constants.hawq_user,
         timeout=hawq_constants.default_exec_timeout)
Esempio n. 2
0
stack_name = default("/hostLevelParams/stack_name", None)

# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
hdp_stack_version = functions.get_hdp_version('hadoop-yarn-resourcemanager')

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)

hostname = config['hostname']

# hadoop default parameters
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_bin = hdp_select.get_hadoop_dir("sbin")
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_yarn_home = '/usr/lib/hadoop-yarn'
hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
yarn_bin = "/usr/lib/hadoop-yarn/sbin"
yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")

# hadoop parameters for 2.2+
if Script.is_hdp_stack_greater_or_equal("2.2"):
  # MapR directory root
  mapred_role_root = "hadoop-mapreduce-client"
  command_role = default("/role", "")
Esempio n. 3
0
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
hadoop_lib_home = hdp_select.get_hadoop_dir("lib")

#hadoop params
if Script.is_hdp_stack_greater_or_equal("2.2"):
    # something like 2.3.0.0-1234
    stack_version = None
    upgrade_stack = hdp_select._get_upgrade_stack()
    if upgrade_stack is not None and len(
            upgrade_stack) == 2 and upgrade_stack[1] is not None:
        stack_version = upgrade_stack[1]

    # oozie-server or oozie-client, depending on role
    oozie_root = status_params.component_directory

    # using the correct oozie root dir, format the correct location
    daemon_name = 'mysql'
  else:
    daemon_name = 'mysqld'

  # Security related/required params
  hostname = config['hostname']
  security_enabled = config['configurations']['cluster-env']['security_enabled']
  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
  tmp_dir = Script.get_tmp_dir()
  hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
  hive_user = config['configurations']['hive-env']['hive_user']
  webhcat_user = config['configurations']['hive-env']['webhcat_user']

  # default configuration directories
  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
  hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
  webhcat_conf_dir = '/etc/hive-webhcat/conf'
  hive_etc_dir_prefix = "/etc/hive"
  hive_conf_dir = "/etc/hive/conf"
  hive_client_conf_dir = "/etc/hive/conf"

  # !!! required by ranger to be at this location unless HDP 2.3+
  hive_server_conf_dir = "/etc/hive/conf.server"

  # HDP 2.2+
  if Script.is_hdp_stack_greater_or_equal("2.2"):
    webhcat_conf_dir = '/usr/hdp/current/hive-webhcat/conf'
    hive_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
    hive_client_conf_dir = format("/usr/hdp/current/{component_directory}/conf")

  # HDP 2.3+
Esempio n. 5
0
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

#mahout params
mahout_home = "/usr/hdp/current/mahout-client"
mahout_conf_dir = "/usr/hdp/current/mahout-client/conf"
mahout_user = config['configurations']['mahout-env']['mahout_user']

yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']

#hadoop params
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
hadoop_home = hdp_select.get_hadoop_dir("home")

# the configuration direction for HDFS/YARN/MapR is the hadoop config
# directory, which is symlinked by hadoop-client only
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()

hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
smokeuser = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
user_group = config['configurations']['cluster-env']['user_group']
security_enabled = config['configurations']['cluster-env']['security_enabled']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
Esempio n. 6
0
  xa_audit_db_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.db'] if xml_configurations_supported else None
  xa_audit_hdfs_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
  ssl_keystore_password = unicode(config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
  ssl_truststore_password = unicode(config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None

  #For SQLA explicitly disable audit to DB for Ranger
  if xa_audit_db_flavor == 'sqla':
    xa_audit_db_is_enabled = False

hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None

import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
  HdfsResource,
  user=hdfs_user,
  security_enabled = security_enabled,
  keytab = hdfs_user_keytab,
  kinit_path_local = kinit_path_local,
  hadoop_bin_dir = hadoop_bin_dir,
  hadoop_conf_dir = hadoop_conf_dir,
  principal_name = hdfs_principal_name,
  hdfs_site = hdfs_site,
Esempio n. 7
0
# hdp version
stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

has_secure_user_auth = False
if Script.is_hdp_stack_greater_or_equal("2.3"):
    has_secure_user_auth = True

# configuration directories
conf_dir = status_params.conf_dir
server_conf_dir = status_params.server_conf_dir

# service locations
hadoop_prefix = hdp_select.get_hadoop_dir("home")
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
zookeeper_home = "/usr/hdp/current/zookeeper-client"

# the configuration direction for HDFS/YARN/MapR is the hadoop config
# directory, which is symlinked by hadoop-client only
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()

# accumulo local directory structure
log_dir = config['configurations']['accumulo-env']['accumulo_log_dir']
client_script = "/usr/hdp/current/accumulo-client/bin/accumulo"
daemon_script = format("ACCUMULO_CONF_DIR={server_conf_dir} {client_script}")

# user and status
accumulo_user = status_params.accumulo_user
user_group = config['configurations']['cluster-env']['user_group']
Esempio n. 8
0
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

# TODO! FIXME! Version check is not working as of today :
#   $ yum list installed | grep hdp-select
#   hdp-select.noarch                            2.2.1.0-2340.el6           @HDP-2.2
# And hdp_stack_version returned from hostLevelParams/stack_version is : 2.2.0.0
# Commenting out for time being
#stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2.1.0') >= 0

spark_conf = '/etc/spark/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")

if Script.is_hdp_stack_greater_or_equal("2.2"):
    hadoop_home = hdp_select.get_hadoop_dir("home")
    spark_conf = format("/usr/hdp/current/{component_directory}/conf")
    spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
    spark_pid_dir = status_params.spark_pid_dir
    spark_home = format("/usr/hdp/current/{component_directory}")

spark_thrift_server_conf_file = spark_conf + "/spark-thrift-sparkconf.conf"
java_home = config['hostLevelParams']['java_home']

hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env'][
    'hdfs_principal_name']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
Esempio n. 9
0
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version

from resource_management.core.system import System
from ambari_commons.os_check import OSCheck

config = Script.get_config()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# default hadoop params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"

  # not supported in HDP 2.2+
  hadoop_conf_empty_dir = None

versioned_hdp_root = '/usr/hdp/current'

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']

#java params
Esempio n. 10
0
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from ambari_commons.os_check import OSCheck
from resource_management.libraries.script.script import Script

config = Script.get_config()

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"

hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
hadoop_bin = hdp_select.get_hadoop_dir("sbin")
hadoop_home = '/usr'
create_lib_snappy_symlinks = True
hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
default_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
    mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
    hadoop_home = hdp_select.get_hadoop_dir("home")
    create_lib_snappy_symlinks = False

current_service = config['serviceName']
Esempio n. 11
0
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from ambari_commons.os_check import OSCheck
from resource_management.libraries.script.script import Script


config = Script.get_config()

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"

hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
hadoop_bin = hdp_select.get_hadoop_dir("sbin")
hadoop_home = '/usr'
create_lib_snappy_symlinks = True

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
  hadoop_home = hdp_select.get_hadoop_dir("home")
  create_lib_snappy_symlinks = False
  
current_service = config['serviceName']

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
Esempio n. 12
0
# [jobbrowser]
jobbrowser_share_jobs = config['configurations']['hue-hadoop-site'][
    'jobbrowser_share_jobs']
jobbrowser_disable_killing_jobs = config['configurations']['hue-hadoop-site'][
    'jobbrowser_disable_killing_jobs']
jobbrowser_log_offset = config['configurations']['hue-hadoop-site'][
    'jobbrowser_log_offset']
# [jobsub]
jobsub_local_data_dir = config['configurations']['hue-hadoop-site'][
    'jobsub_sample_data_dir']
jobsub_sample_data_dir = config['configurations']['hue-hadoop-site'][
    'jobsub_sample_data_dir']

hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
# CHANGE hadoop_bin_dir = stack_select.get_hadoop_dir('bin')
hadoop_bin_dir = hdp_select.get_hadoop_dir('bin')  # NEW
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = config['configurations']['hadoop-env'][
    'hdfs_principal_name']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
# create partial functions with common arguments for every HdfsResource call
# to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
    HdfsResource,
    user=hdfs_user,
    # CHANGE hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
Esempio n. 13
0
# hdp version
stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

has_secure_user_auth = False
if Script.is_hdp_stack_greater_or_equal("2.3"):
  has_secure_user_auth = True

# configuration directories
conf_dir = status_params.conf_dir
server_conf_dir = status_params.server_conf_dir

# service locations
hadoop_prefix = hdp_select.get_hadoop_dir("home")
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
zookeeper_home = "/usr/hdp/current/zookeeper-client"

# the configuration direction for HDFS/YARN/MapR is the hadoop config
# directory, which is symlinked by hadoop-client only
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()

# accumulo local directory structure
log_dir = config['configurations']['accumulo-env']['accumulo_log_dir']
client_script = "/usr/hdp/current/accumulo-client/bin/accumulo"
daemon_script = format("ACCUMULO_CONF_DIR={server_conf_dir} {client_script}")

# user and status
accumulo_user = status_params.accumulo_user
user_group = config['configurations']['cluster-env']['user_group']
Esempio n. 14
0
config = Script.get_config()

dfs_type = default("/commandParams/dfs_type", "")

sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# current host stack version
current_version = default("/hostLevelParams/current_version", None)

# default hadoop params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
    mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"

    # not supported in HDP 2.2+
    hadoop_conf_empty_dir = None

versioned_hdp_root = '/usr/hdp/current'

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']

#java params
Esempio n. 15
0
stack_name = default("/hostLevelParams/stack_name", None)

# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
hdp_stack_version = functions.get_hdp_version('hadoop-yarn-resourcemanager')

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)

hostname = config['hostname']

# hadoop default parameters
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_bin = hdp_select.get_hadoop_dir("sbin")
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_yarn_home = '/usr/lib/hadoop-yarn'
hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
yarn_bin = "/usr/lib/hadoop-yarn/sbin"
yarn_container_bin = "/usr/lib/hadoop-yarn/bin"

# hadoop parameters for 2.2+
if Script.is_hdp_stack_greater_or_equal("2.2"):
  # MapR directory root
  mapred_role_root = "hadoop-mapreduce-client"
  command_role = default("/role", "")
  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
Esempio n. 16
0
  ssl_truststore_password = unicode(config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None

  #For SQLA explicitly disable audit to DB for Ranger
  if xa_audit_db_flavor == 'sqla':
    xa_audit_db_is_enabled = False

namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0

hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))

import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
  HdfsResource,
  user=hdfs_user,
  security_enabled = security_enabled,
  keytab = hdfs_user_keytab,
  kinit_path_local = kinit_path_local,
  hadoop_bin_dir = hadoop_bin_dir,
  hadoop_conf_dir = hadoop_conf_dir,
  principal_name = hdfs_principal_name,
Esempio n. 17
0
    """
  Returns True if port is root-owned at *nix systems
  """
    if port is not None:
        return port < 1024
    else:
        return False


# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"

# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
hadoop_home = hdp_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec",
                                               force_latest_on_upgrade=True)

hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
hadoop_dir = "/etc/hadoop"
versioned_hdp_root = '/usr/hdp/current'
hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
datanode_max_locked_memory = config['configurations']['hdfs-site'][
    'dfs.datanode.max.locked.memory']
is_datanode_max_locked_memory_set = not is_empty(
    config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
Esempio n. 18
0
def is_secure_port(port):
  """
  Returns True if port is root-owned at *nix systems
  """
  if port is not None:
    return port < 1024
  else:
    return False

# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"

# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
hadoop_home = hdp_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)

hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
hadoop_dir = "/etc/hadoop"
versioned_hdp_root = '/usr/hdp/current'

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"

  # not supported in HDP 2.2+
  hadoop_conf_empty_dir = None

  if not security_enabled:
Esempio n. 19
0
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
hadoop_lib_home = hdp_select.get_hadoop_dir("lib")

#hadoop params
if Script.is_hdp_stack_greater_or_equal("2.2"):
  # something like 2.3.0.0-1234
  stack_version = None
  upgrade_stack = hdp_select._get_upgrade_stack()
  if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
    stack_version = upgrade_stack[1]

  # oozie-server or oozie-client, depending on role
  oozie_root = status_params.component_directory

  # using the correct oozie root dir, format the correct location
  oozie_lib_dir = format("/usr/hdp/current/{oozie_root}")
Esempio n. 20
0
hawqmaster_host = __get_component_host('hawqmaster_hosts')
hawqstandby_host = __get_component_host('hawqstandby_hosts')
hawqsegment_hosts = default('/clusterHostInfo/hawqsegment_hosts', [])

# HDFS
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']

security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
hdfs_principal_name = config['configurations']['hadoop-env'][
    'hdfs_principal_name']
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = hadoop_select.get_hadoop_dir("bin")
execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir

# HDFSResource partial function
HdfsResource = functools.partial(HdfsResource,
                                 user=hdfs_superuser,
                                 security_enabled=security_enabled,
                                 keytab=hdfs_user_keytab,
                                 kinit_path_local=kinit_path_local,
                                 principal_name=hdfs_principal_name,
                                 hdfs_site=hdfs_site,
                                 default_fs=default_fs)

# ExecuteHadoop partial function
ExecuteHadoop = functools.partial(ExecuteHadoop,
                                  user=hdfs_superuser,
Esempio n. 21
0
 def run_hawq_check(self, env):
   Logger.info("Executing HAWQ Check ...")
   Execute("source {0} && hawq check -f {1} --hadoop {2} --config {3}".format(hawq_constants.hawq_greenplum_path_file, hawq_constants.hawq_hosts_file, hadoop_select.get_hadoop_dir('home'), hawq_constants.hawq_check_file),
           user=hawq_constants.hawq_user,
           timeout=hawq_constants.default_exec_timeout)