Esempio n. 1
0
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

spark_conf = '/etc/spark2/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")

if stack_version_formatted and check_stack_feature(
        StackFeature.ROLLING_UPGRADE, stack_version_formatted):
    hadoop_home = stack_select.get_hadoop_dir("home")
    spark_conf = format("{stack_root}/current/{component_directory}/conf")
    spark_log_dir = config['configurations']['spark2-env']['spark_log_dir']
    spark_pid_dir = status_params.spark_pid_dir
    spark_home = format("{stack_root}/current/{component_directory}")

spark_daemon_memory = config['configurations']['spark2-env'][
    'spark_daemon_memory']
spark_thrift_server_conf_file = spark_conf + "/spark-thrift-sparkconf.conf"
java_home = config['hostLevelParams']['java_home']

hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
Esempio n. 2
0
dfs_dn_http_addr = default(
    '/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default(
    '/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
dfs_dn_ipc_address = config['configurations']['hdfs-site'][
    'dfs.datanode.ipc.address']
secure_dn_ports_are_in_use = False

hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
namenode_backup_dir = default("/configurations/hadoop-env/namenode_backup_dir",
                              "/tmp/upgrades")

# hadoop default parameters
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_bin = stack_select.get_hadoop_dir("sbin")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_home = stack_select.get_hadoop_dir("home")
hadoop_secure_dn_user = hdfs_user
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")

# hadoop parameters for stacks that support rolling_upgrade
if stack_version_formatted and check_stack_feature(
        StackFeature.ROLLING_UPGRADE, stack_version_formatted):
    mapreduce_libs_path = format(
        "{stack_root}/current/hadoop-mapreduce-client/*")

    if not security_enabled:
Esempio n. 3
0
        livy_livyserver_protocol = 'https'

if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, stack_version_formatted) and \
    len(livy2_hosts) > 0:
    livy2_livyserver_host = str(livy2_hosts[0])
    livy2_livyserver_port = config['configurations']['livy2-conf'][
        'livy.server.port']
    if 'livy.keystore' in config['configurations']['livy2-conf']:
        livy2_livyserver_protocol = 'https'

hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hdfs_principal_name = config['configurations']['hadoop-env'][
    'hdfs_principal_name']
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']

# create partial functions with common arguments for every HdfsResource call
# to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
    HdfsResource,
    user=hdfs_user,
    hdfs_resource_ignore_file=
    "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
    security_enabled=security_enabled,
    keytab=hdfs_user_keytab,
Esempio n. 4
0
        ambari_bare_jaas_principal = get_bare_principal(_ambari_principal_name)

jdk_location = config['ambariLevelParams']['jdk_location']
namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
has_namenode = not len(namenode_hosts) == 0

hdfs_user = config['configurations']['hadoop-env'][
    'hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env'][
    'hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env'][
    'hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site'][
    'fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
dfs_type = default("/clusterLevelParams/dfs_type", "")

# params from flink-ambari-config
flink_install_dir = "/opt/flink"
flink_bin_dir = "/opt/flink"
flink_numcontainers = config['configurations']['flink-site'][
    'flink_numcontainers']
flink_numberoftaskslots = config['configurations']['flink-site'][
    'flink_numberoftaskslots']
flink_jobmanager_memory = config['configurations']['flink-site'][
    'flink_jobmanager_memory']
flink_container_memory = config['configurations']['flink-site'][
Esempio n. 5
0
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
upgrade_direction = default("/commandParams/upgrade_direction", None)
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)

stack_root = status_params.stack_root
stack_version_unformatted =  status_params.stack_version_unformatted
stack_version_formatted =  status_params.stack_version_formatted
version_for_stack_feature_checks = get_stack_feature_version(config)

hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")

#spark_conf
spark_conf_dir = format("{stack_root}/current/spark-client/conf")

#hadoop params
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE,stack_version_formatted):
  stack_version = None
  upgrade_stack = stack_select._get_upgrade_stack()
  if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
    stack_version = upgrade_stack[1]

  # oozie-server or oozie-client, depending on role
  oozie_root = status_params.component_directory
Esempio n. 6
0
hadoop_ssl_cert_ca_verify = config['configurations']['hue-hadoop-site']['ssl_cert_ca_verify']
hadoop_conf_dir = config['configurations']['hue-hadoop-site']['hadoop_conf_dir']
# [filebrowser]
filebrowser_archive_upload_tempdir = config['configurations']['hue-hadoop-site']['filebrowser_archive_upload_tempdir']
filebrowser_show_download_button = config['configurations']['hue-hadoop-site']['filebrowser_show_download_button']
filebrowser_show_upload_button = config['configurations']['hue-hadoop-site']['filebrowser_show_upload_button']
# [jobbrowser]
jobbrowser_share_jobs = config['configurations']['hue-hadoop-site']['jobbrowser_share_jobs']
jobbrowser_disable_killing_jobs = config['configurations']['hue-hadoop-site']['jobbrowser_disable_killing_jobs']
jobbrowser_log_offset = config['configurations']['hue-hadoop-site']['jobbrowser_log_offset']
# [jobsub]
jobsub_local_data_dir = config['configurations']['hue-hadoop-site']['jobsub_sample_data_dir']
jobsub_sample_data_dir = config['configurations']['hue-hadoop-site']['jobsub_sample_data_dir']

hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hadoop_bin_dir = stack_select.get_hadoop_dir('bin')
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
# create partial functions with common arguments for every HdfsResource call
# to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
    HdfsResource,
    user=hdfs_user,
    hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
    security_enabled=security_enabled,
    keytab=hdfs_user_keytab,
Esempio n. 7
0
  """
    if port is not None:
        return port < 1024
    else:
        return False


# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"

# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
hdfs_user_nofile_limit = default(
    "/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
hadoop_home = stack_select.get_hadoop_dir("home")
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")

hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
hadoop_dir = "/etc/hadoop"
versioned_stack_root = '/usr/bgtp/current'
hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
datanode_max_locked_memory = config['configurations']['hdfs-site'][
    'dfs.datanode.max.locked.memory']
is_datanode_max_locked_memory_set = not is_empty(
    config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])

#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env'][
    'hdfs_log_dir_prefix']
Esempio n. 8
0
  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
  'YARN_CLIENT' : 'hadoop-yarn-client'
}

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

architecture = get_architecture()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
tarball_map = default("/configurations/cluster-env/tarball_map", None)

config_path = stack_select.get_hadoop_dir("conf")
config_dir = os.path.realpath(config_path)

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

# This is expected to be of the form #.#.#.#
stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted_major = format_stack_version(stack_version_unformatted)
stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
major_stack_version = get_major_version(stack_version_formatted_major)

stack_supports_ru = check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks)
stack_supports_timeline_state_store = check_stack_feature(StackFeature.TIMELINE_STATE_STORE, version_for_stack_feature_checks)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
Esempio n. 9
0
from resource_management.libraries.script.script import Script
from resource_management.libraries import functions
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

mapred_user = config['configurations']['mapred-env']['mapred_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")

resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")

hadoop_home = stack_select.get_hadoop_dir("home")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()

hostname = config['agentLevelParams']['hostname']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
security_enabled = config['configurations']['cluster-env']['security_enabled']

stack_name = default("/clusterLevelParams/stack_name", None)
Esempio n. 10
0
    """
  Returns True if port is root-owned at *nix systems
  """
    if port is not None:
        return port < 1024
    else:
        return False


# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"

# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec",
                                                 force_latest_on_upgrade=True)

hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
hadoop_dir = "/etc/hadoop"
versioned_stack_root = '/usr/hdp/current'
hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
datanode_max_locked_memory = config['configurations']['hdfs-site'][
    'dfs.datanode.max.locked.memory']
is_datanode_max_locked_memory_set = not is_empty(
    config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])

# HDP 2.2+ params
if Script.is_stack_greater_or_equal("2.2"):
Esempio n. 11
0
# stack version
version = default("/commandParams/version", None)
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

has_secure_user_auth = False
if stack_version_formatted and \
    check_stack_feature(StackFeature.ACCUMULO_KERBEROS_USER_AUTH, stack_version_formatted):
  has_secure_user_auth = True

# configuration directories
conf_dir = status_params.conf_dir
server_conf_dir = status_params.server_conf_dir

# service locations
hadoop_prefix = stack_select.get_hadoop_dir("home")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
zookeeper_home = format("{stack_root}/current/zookeeper-client")

# the configuration direction for HDFS/YARN/MapR is the hadoop config
# directory, which is symlinked by hadoop-client only
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()

# accumulo local directory structure
log_dir = config['configurations']['accumulo-env']['accumulo_log_dir']
client_script = format("{stack_root}/current/accumulo-client/bin/accumulo")
daemon_script = format("ACCUMULO_CONF_DIR={server_conf_dir} {client_script}")

# user and status
accumulo_user = status_params.accumulo_user
user_group = config['configurations']['cluster-env']['user_group']
Esempio n. 12
0
def pre_upgrade_deregister():
    """
  Runs the "hive --service hiveserver2 --deregister <version>" command to
  de-provision the server in preparation for an upgrade. This will contact
  ZooKeeper to remove the server so that clients that attempt to connect
  will be directed to other servers automatically. Once all
  clients have drained, the server will shutdown automatically; this process
  could take a very long time.
  This function will obtain the Kerberos ticket if security is enabled.
  :return:
  """
    import params

    Logger.info(
        'HiveServer2 executing "deregister" command in preparation for upgrade...'
    )

    if params.security_enabled:
        kinit_command = format(
            "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; "
        )
        Execute(kinit_command, user=params.smokeuser)

    # calculate the current hive server version
    current_hiveserver_version = _get_current_hiveserver_version()
    if current_hiveserver_version is None:
        raise Fail(
            'Unable to determine the current HiveServer2 version to deregister.'
        )

    # fallback when upgrading because /usr/iop/current/hive-server2/conf/conf.server may not exist
    hive_server_conf_dir = params.hive_server_conf_dir
    if not os.path.exists(hive_server_conf_dir):
        hive_server_conf_dir = "/etc/hive/conf.server"

    # deregister
    hive_execute_path = params.execute_path
    # If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
    # If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
    if "upgrade" == params.upgrade_direction:
        # hive_bin
        upgrade_target_version = format_stack_version(params.version)
        if upgrade_target_version and compare_versions(upgrade_target_version,
                                                       "4.1.0.0") >= 0:
            upgrade_target_hive_bin = format('/usr/iop/{version}/hive/bin')
            if (os.pathsep + params.hive_bin) in hive_execute_path:
                hive_execute_path = hive_execute_path.replace(
                    os.pathsep + params.hive_bin,
                    os.pathsep + upgrade_target_hive_bin)
        # hadoop_bin_dir
        upgrade_target_hadoop_bin = stack_select.get_hadoop_dir(
            "bin", upgrade_stack_only=True)
        upgrade_source_hadoop_bin = params.hadoop_bin_dir
        if upgrade_target_hadoop_bin and len(
                upgrade_target_hadoop_bin) > 0 and (
                    os.pathsep +
                    upgrade_source_hadoop_bin) in hive_execute_path:
            hive_execute_path = hive_execute_path.replace(
                os.pathsep + upgrade_source_hadoop_bin,
                os.pathsep + upgrade_target_hadoop_bin)

    command = format(
        'hive --config {hive_server_conf_dir} --service hiveserver2 --deregister '
        + current_hiveserver_version)
    Execute(command, user=params.hive_user, path=hive_execute_path, tries=1)
Esempio n. 13
0
stack_supports_ranger_audit_db = check_stack_feature(
    StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_hive_jdbc_url_change = check_stack_feature(
    StackFeature.RANGER_HIVE_PLUGIN_JDBC_URL, version_for_stack_feature_checks)
stack_supports_atlas_hook_for_hive_interactive = check_stack_feature(
    StackFeature.HIVE_INTERACTIVE_ATLAS_HOOK_REQUIRED,
    version_for_stack_feature_checks)
stack_supports_hive_interactive_ga = check_stack_feature(
    StackFeature.HIVE_INTERACTIVE_GA_SUPPORT, version_for_stack_feature_checks)

# component ROLE directory (like hive-metastore or hive-server2-hive2)
component_directory = status_params.component_directory
component_directory_interactive = status_params.component_directory_interactive

# used to render hadoop configurations, such as writing out its own mapreduce2 configs
hadoop_home = stack_select.get_hadoop_dir("home")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")

hive_bin = format('{stack_root}/current/{component_directory}/bin')
hive_cmd = os.path.join(hive_bin, "hive")
hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
hive_lib = format('{stack_root}/current/{component_directory}/lib')
hive_version_lib = format('{stack_root}/{version}/hive/lib')
hive_var_lib = '/var/lib/hive'
hive_user_home_dir = "/home/hive"

# starting on stacks where HSI is supported, we need to begin using the 'hive2' schematool
hive_server2_hive2_dir = None
hive_server2_hive2_lib = None
Esempio n. 14
0
# Elasticsearch hosts and port management
es_cluster_name = config['configurations']['metron-env']['es_cluster_name']
es_hosts = config['configurations']['metron-env']['es_hosts']
es_host_list = es_hosts.split(",")
es_http_port = config['configurations']['metron-env']['es_http_port']
es_url = ",".join([host + ":" + es_http_port for host in es_host_list])
es_http_url = es_host_list[0] + ":" + es_http_port
es_date_format = config['configurations']['metron-env']['es_date_format']

# hadoop params
stack_root = Script.get_stack_root()
# This is the cluster group named 'hadoop'. Its membership is the stack process user ids not individual users.
# The config name 'user_group' is out of our control and a bit misleading, so it is renamed to 'hadoop_group'.
hadoop_group = config['configurations']['cluster-env']['user_group']
hadoop_home_dir = stack_select.get_hadoop_dir("home")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
kafka_home = os.path.join(stack_root, "current", "kafka-broker")
kafka_bin_dir = os.path.join(kafka_home, "bin")

# zookeeper
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
has_zk_host = not len(zk_hosts) == 0
zookeeper_quorum = None
if has_zk_host:
    if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
        zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
    else:
        zookeeper_clientPort = '2181'
    zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
Esempio n. 15
0
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option

from resource_management.core.system import System
from ambari_commons.os_check import OSCheck

config = Script.get_config()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])

# default hadoop params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']

#java params
java_home = config['hostLevelParams']['java_home']

#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env'][
    'hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env'][
    'hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env'][
    'hadoop_root_logger']
Esempio n. 16
0
solr_collection_config_dir = map_example_collection['solr_collection_sample_config_directory']
solr_collection_shards = str(map_example_collection['solr_collection_sample_shards'])
solr_collection_replicas = str(map_example_collection['solr_collection_sample_replicas'])

# Solr security
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))

# solr + HDFS
map_solr_hdfs = config['configurations']['solr-hdfs']
solr_hdfs_enable = bool(map_solr_hdfs['solr_hdfs_enable'])
solr_hdfs_prefix = '#' if not solr_hdfs_enable else ''

if solr_hdfs_enable:
    solr_hdfs_directory = map_solr_hdfs['solr_hdfs_directory']
    hadoop_bin_dir = stack_select.get_hadoop_dir('bin')
    hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
    hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
    hdfs_site = config['configurations']['hdfs-site']
    hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
    default_fs = config['configurations']['core-site']['fs.defaultFS']
    dfs_type = default('/commandParams/dfs_type', '')
    hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
    solr_hdfs_delete_write_lock_files = bool(map_solr_hdfs['solr_hdfs_delete_write_lock_files'])

    HdfsResource = functools.partial(
        HdfsResource,
        user=hdfs_user,
        hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
        security_enabled=security_enabled,
        keytab=hdfs_user_keytab,
Esempio n. 17
0
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])

# e.g. 2.3.0.0
#hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# e.g. 2.3.0.0-2130
full_version = default("/commandParams/version", None)
hdp_version = full_version
stack_root = Script.get_stack_root()

hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
dfs_type = default("/commandParams/dfs_type", "")
splice_pid_file = "/tmp/splice-ambari-master.pid"


import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
        HdfsResource,
        user=hdfs_user,
        hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
        security_enabled = security_enabled,