Example #1
0
def setup_hbase_extensions():
    import params

    # HBase Custom extensions
    hbase_custom_extensions_enabled = default(
        "/configurations/hbase-site/hbase.custom-extensions.enabled", False)
    hbase_custom_extensions_owner = default(
        "/configurations/hbase-site/hbase.custom-extensions.owner",
        params.hdfs_user)
    hbase_custom_extensions_hdfs_dir = get_config_formatted_value(
        default("/configurations/hbase-site/hbase.custom-extensions.root",
                DEFAULT_HADOOP_HBASE_EXTENSION_DIR))
    hbase_custom_extensions_local_dir = "{0}/ext/hbase".format(
        Script.get_stack_root())

    impacted_components = [
        'HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER'
    ]
    role = params.config.get('role', '')

    if role in impacted_components:
        clean_extensions(hbase_custom_extensions_local_dir)
        if hbase_custom_extensions_enabled:
            download_extensions(hbase_custom_extensions_owner,
                                params.user_group,
                                hbase_custom_extensions_hdfs_dir,
                                hbase_custom_extensions_local_dir)
Example #2
0
def setup_extensions():
    import params

    # Hadoop Custom extensions
    hadoop_custom_extensions_enabled = default(
        "/configurations/core-site/hadoop.custom-extensions.enabled", False)
    hadoop_custom_extensions_services = default(
        "/configurations/core-site/hadoop.custom-extensions.services", "")
    hadoop_custom_extensions_owner = default(
        "/configurations/core-site/hadoop.custom-extensions.owner",
        params.hdfs_user)
    hadoop_custom_extensions_services = [
        service.strip().upper()
        for service in hadoop_custom_extensions_services.split(",")
    ]
    hadoop_custom_extensions_services.append("YARN")
    hadoop_custom_extensions_hdfs_dir = "/iop/ext/{0}/hadoop".format(
        params.stack_version_formatted)
    hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(
        Script.get_stack_root())

    if params.current_service in hadoop_custom_extensions_services:
        clean_extensions(hadoop_custom_extensions_local_dir)
        if hadoop_custom_extensions_enabled:
            download_extensions(hadoop_custom_extensions_owner,
                                params.user_group,
                                hadoop_custom_extensions_hdfs_dir,
                                hadoop_custom_extensions_local_dir)

    setup_extensions_hive()

    hbase_custom_extensions_services = []
    hbase_custom_extensions_services.append("HBASE")
    if params.current_service in hbase_custom_extensions_services:
        setup_hbase_extensions()
def setup_extensions_hive():
    import params

    hive_custom_extensions_enabled = default(
        "/configurations/hive-site/hive.custom-extensions.enabled", False)
    hive_custom_extensions_owner = default(
        "/configurations/hive-site/hive.custom-extensions.owner",
        params.hdfs_user)
    hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(
        params.major_stack_version)

    hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(
        Script.get_stack_root())

    impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT']
    role = params.config.get('role', '')

    # Run copying for HIVE_SERVER and HIVE_CLIENT
    if params.current_service == 'HIVE' and role in impacted_components:
        clean_extensions(hive_custom_extensions_local_dir)
        if hive_custom_extensions_enabled:
            download_extensions(hive_custom_extensions_owner,
                                params.user_group,
                                hive_custom_extensions_hdfs_dir,
                                hive_custom_extensions_local_dir)
Example #4
0
 def kerberos_client_conf(self):
     kerberos_host = default('clusterHostInfo/krb5_master_hosts', [])
     if len(kerberos_host) > 0:
         realm = default('configurations/krb5-config/kdc.realm',
                         'EXAMPLE.COM')
         Execute('/usr/sbin/authconfig --enablekrb5 --krb5kdc="' +
                 ' '.join(kerberos_host) + '"  --krb5adminserver="' +
                 ' '.join(kerberos_host) + '"  --krb5realm="' + realm +
                 '"  --update')
Example #5
0
def kerberos_client_conf():
    kerberos_host = default('clusterHostInfo/krb5_master_hosts', [])
    realm = default('configurations/krb5-config/kdc.realm', 'example.com')
    kdc_hosts = default('configurations/zookeeper-env/kdc_hosts', '')
    if kdc_hosts.strip() != '':
        Execute('/usr/sbin/authconfig --enablekrb5 --krb5kdc="' + kdc_hosts +
                '"  --krb5adminserver="' + kdc_hosts + '"  --krb5realm="' +
                realm + '"  --update')
    elif len(kerberos_host) > 0:
        Execute('/usr/sbin/authconfig --enablekrb5 --krb5kdc="' +
                ' '.join(kerberos_host) + '"  --krb5adminserver="' +
                ' '.join(kerberos_host) + '"  --krb5realm="' + realm +
                '"  --update')
    else:
        raise Fail(u'ldap地址为空 请先填写KDC地址或 安装KDC')
Example #6
0
def conf_ntp():
    lock_file = base_lock_dir + '/install_ntp'
    ntp_file = '/etc/chrony.conf'
    ntp_server_hosts = default('clusterHostInfo/ntp_server_hosts', [])
    if len(ntp_server_hosts) > 0 and not os.path.exists(lock_file):
        hostname = get_hostname()
        if hostname not in ntp_server_hosts:
            ntp_server_list = ['server  ' + server + '   iburst' for server in ntp_server_hosts]
            ntp_server = '\n '.join(ntp_server_list)
            ntp_conf = ntp_server + '''

driftfile /var/lib/chrony/drift

makestep 1.0 3

rtcsync

allow 192.168.0.0/24
allow 172.16.0.0/16
allow 10.0.0.0/8

logdir /var/log/chrony
            '''
            Execute('echo -ne ' + ntp_conf + ' > ' + ntp_file)

            Execute('systemctl enable chronyd && systemctl restart chronyd')

            Execute(" echo 1 > " + lock_file)
Example #7
0
  def configure(self, env, upgrade_type=None):
    import params

    # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
    if upgrade_type is None:
      restart_type = default("/commandParams/restart_type", "")
      if restart_type.lower() == "rolling_upgrade":
        upgrade_type = UPGRADE_TYPE_ROLLING
      elif restart_type.lower() == "nonrolling_upgrade":
        upgrade_type = UPGRADE_TYPE_NON_ROLLING

    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
      Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
      if compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
        # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
        # oozie, we need to create the symlinks both for server and client.
        # This is required as both need to be pointing to new installed oozie version.

        # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
        stack_select.select("oozie-client", params.version)
        # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
        stack_select.select("oozie-server", params.version)

      if compare_versions(format_stack_version(params.version), '2.3.0.0') >= 0:
        conf_select.select(params.stack_name, "oozie", params.version)

    env.set_params(params)
    oozie(is_server=True)
Example #8
0
def setup_hbase_extensions():
    import params

    # HBase Custom extensions
    hbase_custom_extensions_enabled = default(
        "/configurations/hbase-site/hbase.custom-extensions.enabled", False)
    hbase_custom_extensions_owner = default(
        "/configurations/hbase-site/hbase.custom-extensions.owner",
        params.hdfs_user)
    hbase_custom_extensions_hdfs_dir = "/iop/ext/{0}/hbase".format(
        params.stack_version_formatted)

    if hbase_custom_extensions_enabled:
        download_hbase_extensions(hbase_custom_extensions_owner,
                                  params.user_group,
                                  hbase_custom_extensions_hdfs_dir)
  def configure(self, env, upgrade_type=None):
    import params

    # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
    if upgrade_type is None:
      restart_type = default("/commandParams/restart_type", "")
      if restart_type.lower() == "rolling_upgrade":
        upgrade_type = UPGRADE_TYPE_ROLLING
      elif restart_type.lower() == "nonrolling_upgrade":
        upgrade_type = UPGRADE_TYPE_NON_ROLLING

    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
      Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
      if compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
        # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
        # oozie, we need to create the symlinks both for server and client.
        # This is required as both need to be pointing to new installed oozie version.

        # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
        hdp_select.select("oozie-client", params.version)
        # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
        hdp_select.select("oozie-server", params.version)

      if compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
        conf_select.select(params.stack_name, "oozie", params.version)

    env.set_params(params)
    oozie(is_server=True)
Example #10
0
    def configure(self, env, upgrade_type=None):
        import params

        # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
        if upgrade_type is None:
            upgrade_type = Script.get_upgrade_type(
                default("/commandParams/upgrade_type", ""))

        if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
            Logger.info(
                format(
                    "Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"
                ))
            if params.version and check_stack_feature(
                    StackFeature.ROLLING_UPGRADE, params.version):
                # In order for the "<stack-root>/current/oozie-<client/server>" point to the new version of
                # oozie, we need to create the symlinks both for server and client.
                # This is required as both need to be pointing to new installed oozie version.

                # Sets the symlink : eg: <stack-root>/current/oozie-client -> <stack-root>/a.b.c.d-<version>/oozie
                stack_select.select("oozie-client", params.version)
                # Sets the symlink : eg: <stack-root>/current/oozie-server -> <stack-root>/a.b.c.d-<version>/oozie
                stack_select.select("oozie-server", params.version)

            if params.version and check_stack_feature(
                    StackFeature.CONFIG_VERSIONING, params.version):
                conf_select.select(params.stack_name, "oozie", params.version)

        env.set_params(params)
        oozie(is_server=True, upgrade_type=upgrade_type)
def setup_extensions():
    """
  The goal of this method is to distribute extensions (for example jar files) from
  HDFS (/hdp/ext/{major_stack_version}/{service_name}) to all nodes which contain related
  components of service (YARN, HIVE or HBASE). Extensions should be added to HDFS by
  user manually.
  """

    import params

    # Hadoop Custom extensions
    hadoop_custom_extensions_enabled = default(
        "/configurations/core-site/hadoop.custom-extensions.enabled", False)
    hadoop_custom_extensions_services = default(
        "/configurations/core-site/hadoop.custom-extensions.services", "")
    hadoop_custom_extensions_owner = default(
        "/configurations/core-site/hadoop.custom-extensions.owner",
        params.hdfs_user)
    hadoop_custom_extensions_hdfs_dir = get_config_formatted_value(
        default(
            "/configurations/core-site/hadoop.custom-extensions.root",
            DEFAULT_HADOOP_HDFS_EXTENSION_DIR.format(
                params.major_stack_version)))
    hadoop_custom_extensions_services = [
        service.strip().upper()
        for service in hadoop_custom_extensions_services.split(",")
    ]
    hadoop_custom_extensions_services.append("YARN")

    hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(
        Script.get_stack_root())

    if params.current_service in hadoop_custom_extensions_services:
        clean_extensions(hadoop_custom_extensions_local_dir)
        if hadoop_custom_extensions_enabled:
            download_extensions(hadoop_custom_extensions_owner,
                                params.user_group,
                                hadoop_custom_extensions_hdfs_dir,
                                hadoop_custom_extensions_local_dir)

    setup_extensions_hive()

    hbase_custom_extensions_services = []
    hbase_custom_extensions_services.append("HBASE")
    if params.current_service in hbase_custom_extensions_services:
        setup_hbase_extensions()
Example #12
0
    def install(self, env):
        install_packages = default(
            '/configurations/kerberos-env/install_packages', "true")
        if install_packages:
            self.install_packages(env)
        else:
            print "Kerberos client packages are not being installed, manual installation is required."

        self.configure(env)
Example #13
0
def ldap_client_conf():
    ldap_url = ''
    basedn = default('openldap-config/ldap.domain', 'dc=example,dc=com')
    ldap_hosts = default('clusterHostInfo/openldap_master_hosts', [])
    ldap_hosts_input = default('configurations/zookeeper-env/ldap_hosts', '')
    if ldap_hosts_input.strip() != '':
        ldap_hosts = ldap_hosts_input.split(' ')
        ldap_url = ['ldap://' + item + '/' for item in ldap_hosts]
        ldap_url = ' '.join(ldap_url)
    elif len(ldap_hosts) > 0:
        ldap_url = ['ldap://' + item + '/' for item in ldap_hosts]
        ldap_url = ' '.join(ldap_url)
    if len(ldap_url) > 0:
        Execute("mkdir -p /etc/openldap/cacerts")
        Execute(
            '/usr/sbin/authconfig --enablekrb5 --enableshadow --useshadow --enablelocauthorize --enableldap --enableldapauth --ldapserver="'
            + ldap_url + '" --ldapbasedn="' + basedn + '" --update')
    else:
        raise Fail(u'ldap地址为空 请先填写ldap地址 或 安装ldap')
Example #14
0
    def service_check(self, env):
        import params

        # If Ambari IS managing Kerberos identities (kerberos-env/manage_identities = true), it is
        # expected that a (smoke) test principal and its associated keytab file is available for use
        # **  If not available, this service check will fail
        # **  If available, this service check will execute
        #
        # If Ambari IS NOT managing Kerberos identities (kerberos-env/manage_identities = false), the
        # smoke test principal and its associated keytab file may not be available
        # **  If not available, this service check will execute
        # **  If available, this service check will execute

        if ((params.smoke_test_principal is not None)
                and (params.smoke_test_keytab_file is not None)
                and os.path.isfile(params.smoke_test_keytab_file)):
            print "Performing kinit using %s" % params.smoke_test_principal

            ccache_file_name = HASH_ALGORITHM("{0}|{1}".format(
                params.smoke_test_principal,
                params.smoke_test_keytab_file)).hexdigest()
            ccache_file_path = "{0}{1}kerberos_service_check_cc_{2}".format(
                params.tmp_dir, os.sep, ccache_file_name)

            kinit_path_local = functions.get_kinit_path(
                default('/configurations/kerberos-env/executable_search_paths',
                        None))
            kinit_command = "{0} -c {1} -kt {2} {3}".format(
                kinit_path_local, ccache_file_path,
                params.smoke_test_keytab_file, params.smoke_test_principal)

            try:
                # kinit
                Execute(kinit_command,
                        user=params.smoke_user,
                        wait_for_finish=True,
                        tries=params.service_check_retry_count,
                        try_sleep=params.service_check_retry_period_sec)
            finally:
                File(
                    ccache_file_path,
                    # Since kinit might fail to write to the cache file for various reasons, an existence check should be done before cleanup
                    action="delete",
                )
        elif params.manage_identities:
            err_msg = Logger.filter_text(
                "Failed to execute kinit test due to principal or keytab not found or available"
            )
            raise Fail(err_msg)
        else:
            # Ambari is not managing identities so if the smoke user does not exist, indicate why....
            print "Skipping this service check since Ambari is not managing Kerberos identities and the smoke user " \
                  "credentials are not available. To execute this service check, the smoke user principal name " \
                  "and keytab file location must be set in the cluster_env and the smoke user's keytab file must" \
                  "exist in the configured location."
Example #15
0
def kerberos_client_conf():
    lock_file = base_lock_dir + '/install_kerberos'

    if not os.path.exists(lock_file):
        kerberos_host = default('clusterHostInfo/krb5_master_hosts', [])
        realm = default('configurations/krb5-config/kdc.realm', 'example.com')
        kdc_hosts = default('configurations/zookeeper-env/kdc_hosts', '')

        if kdc_hosts.strip() != '':
            Execute('/usr/sbin/authconfig --enablekrb5 --krb5kdc="' + kdc_hosts +
                    '"  --krb5adminserver="' + kdc_hosts + '"  --krb5realm="' +
                    realm + '"  --update')
        elif len(kerberos_host) > 0:
            shuffle(kerberos_host)
            Execute('/usr/sbin/authconfig --enablekrb5 --krb5kdc="' +
                    ' '.join(kerberos_host) + '"  --krb5adminserver="' +
                    ' '.join(kerberos_host) + '"  --krb5realm="' + realm +
                    '"  --update')

        Execute(" echo 1 > " + lock_file)
Example #16
0
def ldap_client_conf():
    lock_file = base_lock_dir + '/install_ldap'
    if not os.path.exists(lock_file):
        ldap_url = ''
        basedn = default('openldap-config/ldap.domain', 'dc=example,dc=com')
        ldap_hosts = default('clusterHostInfo/openldap_master_hosts', [])
        ldap_hosts_input = default('configurations/zookeeper-env/ldap_hosts',
                                   '')
        if ldap_hosts_input.strip() != '':
            ldap_hosts = ldap_hosts_input.split(' ')
            ldap_url = ['ldap://' + item + '/' for item in ldap_hosts]
        elif len(ldap_hosts) > 0:
            ldap_url = ['ldap://' + item + '/' for item in ldap_hosts]
        if len(ldap_url) > 0:
            ldap_url = ' '.join(ldap_url)
            shuffle(ldap_url)
            Execute("mkdir -p /etc/openldap/cacerts")
            Execute(
                '/usr/sbin/authconfig --enablemkhomedir --enableshadow --useshadow --enablelocauthorize --enableldap --enableldapauth --ldapserver="'
                + ldap_url + '" --ldapbasedn="' + basedn + '" --update')
        Execute("echo 'threads 1' >>/etc/nslcd.conf")
        Execute("systemctl restart nslcd nscd")
        Execute(" echo 1 > " + lock_file)
Example #17
0
def conf_dns():
    lock_file = base_lock_dir + '/install_dns'
    dns_file = '/etc/resolv.conf'
    # add dns record
    hostname = get_hostname()
    ip = get_ip()
    dns_hosts = default('clusterHostInfo/dns_hosts', [])
    if len(dns_hosts) > 0 and os.path.exists(lock_file):
        nameserver_hosts = dns_hosts.append('8.8.8.8')
        nameserver = 'nameserver ' + '\nnameserver '.join(nameserver_hosts)
        Execute('echo -ne ' + nameserver + ' > ' + dns_file)
        for dns_host in dns_hosts:
            Execute('curl -XPOST -d "hostname=' + hostname + '&ip=' + ip +
                    '" http://' + dns_host + ':8088/')

        Execute(" echo 1 > " + lock_file)
Example #18
0
def install_common_share_lib():
    import params
    share_dir = '/usr/share/java/common/'
    Directory(
        share_dir,
        owner='hdfs',
        group=params.user_group,
        create_parents=True,
        mode=0755)

    share_jar_files_conf = default(
        "/configurations/hadoop-env/common_share_jars", '').strip()
    if share_jar_files_conf != '':
        share_jar_files = share_jar_files_conf.split(',')
        for jar_file in share_jar_files:
            jar_file_path = share_dir + jar_file.strip()
            if not os.path.exists(jar_file_path):
                Execute('wget ' + download_url_base + '/share/common/' + jar_file + ' -O ' + jar_file_path,
                        user='******')
Example #19
0
    def configure(self, env, upgrade_type=None):
        import params

        # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
        if upgrade_type is None:
            upgrade_type = Script.get_upgrade_type(
                default("/commandParams/upgrade_type", ""))

        if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
            Logger.info(
                format(
                    "Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"
                ))
            if params.version and check_stack_feature(
                    StackFeature.ROLLING_UPGRADE, params.version):
                stack_select.select_packages(params.version)

        env.set_params(params)
        oozie(is_server=True, upgrade_type=upgrade_type)
Example #20
0
def build_zookeeper_hosts():
    zookeeper_hosts_length = len(zookeeper_hosts_list)
    response = ''
    for i, val in enumerate(zookeeper_hosts_list):
        response += val + ':' + zk_client_port
        if (i + 1) < zookeeper_hosts_length:
            response += ','
    return response


config = Script.get_config()

java64_home = config['hostLevelParams']['java_home']
hostname = config['hostname']
zk_client_port = str(default('/configurations/zoo.cfg/clientPort', None))
zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts = build_zookeeper_hosts()

map_solr_config = config['configurations']['solr-config-env']
solr_config_user = map_solr_config['solr_config_user']
solr_hdfs_home_directory = format('/user/{solr_config_user}')
solr_config_group = map_solr_config['solr_config_group']
solr_config_port = status_params.solr_config_port
solr_config_memory = map_solr_config['solr_config_memory']
solr_config_log_dir = map_solr_config['solr_config_log_dir']
solr_config_service_log_dir = map_solr_config['solr_config_service_log_dir']
solr_config_service_log_file = format('{solr_config_service_log_dir}/solr-service.log')
solr_config_conf_dir = map_solr_config['solr_config_conf_dir']
solr_config_home_dir = map_solr_config['solr_config_home_dir']
solr_stop_key = map_solr_config['solr_stop_key']
Example #21
0
indexing_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_configured'
indexing_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_acl_configured'
indexing_hdfs_perm_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hdfs_perm_configured'
indexing_hbase_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hbase_configured'
indexing_hbase_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hbase_acl_configured'

# REST
metron_rest_port = config['configurations']['metron-rest-env']['metron_rest_port']

# UI
metron_management_ui_port = config['configurations']['metron-management-ui-env']['metron_management_ui_port']

# Storm
storm_rest_addr = config['configurations']['metron-env']['storm_rest_addr']

# Zeppelin
zeppelin_server_url = config['configurations']['metron-env']['zeppelin_server_url']

# Security
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version_formatted = format_stack_version(stack_version_unformatted)
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()

metron_user = config['configurations']['metron-env']['metron_user']

metron_principal_name = config['configurations']['metron-env']['metron_principal_name']
metron_keytab_path = config['configurations']['metron-env']['metron_service_keytab']
Example #22
0
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from ambari_commons.os_check import OSCheck
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
artifact_dir = tmp_dir + "/AMBARI-artifacts"

# Global flag enabling or disabling the sysprep feature
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
# This is required if tarballs are going to be copied to HDFS, so set to False
sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)

# Whether to skip setting up the unlimited key JCE policy
sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

dfs_type = default("/commandParams/dfs_type", "")
stack_root = Script.get_stack_root()
hadoop_conf_dir = "/etc/hadoop/conf"
component_list = default("/localComponents", [])
Example #23
0
    'namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env'][
    'namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_permsize", "128m")
namenode_opt_maxpermsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_maxpermsize", "256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"

dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default(
    "/configurations/mapred-env/mapred_pid_dir_prefix",
    "/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default(
    "/configurations/mapred-env/mapred_log_dir_prefix",
    "/var/log/hadoop-mapreduce")

#users and groups
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
user_group = config['configurations']['cluster-env']['user_group']

namenode_host = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_host) == 0

if has_namenode:
    hadoop_conf_dir = conf_select.get_hadoop_conf_dir(
        force_latest_on_upgrade=True)
Example #24
0
"""

from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.str_utils import cbool, cint
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from resource_management.core.system import System
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import default, format

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
agent_stack_retry_on_unavailability = cbool(
    default("/hostLevelParams/agent_stack_retry_on_unavailability", None))
agent_stack_retry_count = cint(
    default("/hostLevelParams/agent_stack_retry_count", None))
stack_version_formatted = format_stack_version(stack_version_unformatted)

#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]

user_group = config['configurations']['cluster-env']['user_group']
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group",
                          "users")
Example #25
0
import os

from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.script import Script
from resource_management.libraries.script.script import get_config_lock_file
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_stack_version, get_major_version
from string import lower

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

dfs_type = default("/commandParams/dfs_type", "")

is_parallel_execution_enabled = int(
    default("/agentConfigParams/agent/parallel_execution", 0)) == 1
host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)

sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)

# service name
service_name = config['serviceName']

# logsearch configuration
Example #26
0
indexing_hbase_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hbase_acl_configured'

# REST
metron_rest_port = config['configurations']['metron-rest-env'][
    'metron_rest_port']

# UI
metron_management_ui_port = config['configurations'][
    'metron-management-ui-env']['metron_management_ui_port']

# Storm
storm_rest_addr = config['configurations']['metron-env']['storm_rest_addr']

# Zeppelin
zeppelin_server_url = config['configurations']['metron-env'][
    'zeppelin_server_url']

# Security
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version_formatted = format_stack_version(stack_version_unformatted)
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()

metron_principal_name = config['configurations']['metron-env'][
    'metron_principal_name']
metron_keytab_path = config['configurations']['metron-env'][
    'metron_service_keytab']
Example #27
0
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.version import compare_versions
from ambari_commons.os_check import OSCheck


config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
jce_location = config['hostLevelParams']['jdk_location']
jdk_name = default("/hostLevelParams/jdk_name", None)
java_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])

ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']

# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
Example #28
0
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']

#users and groups
has_hadoop_env = 'hadoop-env' in config['configurations']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']

user_group = config['configurations']['cluster-env']['user_group']

#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])

has_namenode = not len(namenode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_slaves = not len(slave_hosts) == 0
Example #29
0
from resource_management.libraries.functions.version import compare_versions
from resource_management.libraries.resources import HdfsDirectory

import status_params
import itertools
import os

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

#hadoop params
if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
  # start out assuming client libraries
  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
  hadoop_lib_home = "/usr/hdp/current/hadoop-client/lib"

  # if this is a server action, then use the server binaries; smoke tests
  # use the client binaries
  server_role_dir_mapping = { 'OOZIE_SERVER' : 'oozie-server',
Example #30
0
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version

config = Script.get_config()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# current host stack version
current_version = default("/hostLevelParams/current_version", None)

# default hadoop params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"

  # not supported in HDP 2.2+
  hadoop_conf_empty_dir = None

versioned_hdp_root = '/usr/hdp/current'
Example #31
0
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from ambari_commons.os_check import OSCheck
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource

config = Script.get_config()

# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
# This is required if tarballs are going to be copied to HDFS, so set to False
sysprep_skip_copy_fast_jar_hdfs = default(
    "/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

dfs_type = default("/commandParams/dfs_type", "")
hadoop_conf_dir = "/etc/hadoop/conf"
component_list = default("/localComponents", [])

hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']

hadoop_metrics2_properties_content = config['configurations'][
    'hadoop-metrics2.properties']['content']

# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions import default
from resource_management.libraries.script.script import Script

# server configurations
config = Script.get_config()

# upgrade params
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])

flume_conf_dir = '/etc/flume/conf'
if Script.is_hdp_stack_greater_or_equal("2.2"):
  flume_conf_dir = '/usr/hdp/current/flume-server/conf'

flume_user = '******'
flume_group = 'flume'
if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']:
  flume_user = config['configurations']['flume-env']['flume_user']
Example #33
0
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.constants import LOGFEEDER_CONF_DIR
from resource_management.libraries.script import Script
from resource_management.libraries.script.script import get_config_lock_file
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_stack_version, get_major_version
from string import lower

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

versioned_stack_root = Script.get_stack_root()

dfs_type = default("/clusterLevelParams/dfs_type", "")

is_parallel_execution_enabled = int(
    default("/agentConfigParams/agent/parallel_execution", 0)) == 1
host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)

sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)

hadoop_home = versioned_stack_root + '/hadoop'
hadoop_libexec_dir = hadoop_home + "/libexec"
hadoop_lib_home = hadoop_home + '/lib'
# service name
indexing_hbase_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hbase_acl_configured'

# Elasticsearch
elasticsearch_template_installed_flag_file = metron_zookeeper_config_path + '/../metron_elasticsearch_template_installed_flag_file'

# REST
metron_rest_port = config['configurations']['metron-rest-env']['metron_rest_port']

# UI
metron_management_ui_port = config['configurations']['metron-management-ui-env']['metron_management_ui_port']
metron_alerts_ui_port = config['configurations']['metron-alerts-ui-env']['metron_alerts_ui_port']

# Storm
storm_rest_addr = config['configurations']['metron-env']['storm_rest_addr']

# Zeppelin
zeppelin_server_url = config['configurations']['metron-env']['zeppelin_server_url']

# Security
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version_formatted = format_stack_version(stack_version_unformatted)
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()

metron_user = config['configurations']['metron-env']['metron_user']

metron_principal_name = config['configurations']['metron-env']['metron_principal_name']
metron_keytab_path = config['configurations']['metron-env']['metron_service_keytab']
Example #35
0
def build_zookeeper_hosts():
    zookeeper_hosts_length = len(zookeeper_hosts_list)
    response = ''
    for i, val in enumerate(zookeeper_hosts_list):
        response += val + ':' + zk_client_port
        if (i + 1) < zookeeper_hosts_length:
            response += ','
    return response


config = Script.get_config()

java64_home = config['hostLevelParams']['java_home']
hostname = config['hostname']
zk_client_port = str(default('/configurations/zoo.cfg/clientPort', None))
zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts = build_zookeeper_hosts()

map_solr_config = config['configurations']['solr-config-env']
solr_config_user = map_solr_config['solr_config_user']
solr_hdfs_home_directory = format('/user/{solr_config_user}')
solr_config_group = map_solr_config['solr_config_group']
solr_config_port = status_params.solr_config_port
solr_config_memory = map_solr_config['solr_config_memory']
solr_config_log_dir = map_solr_config['solr_config_log_dir']
solr_config_service_log_dir = map_solr_config['solr_config_service_log_dir']
solr_config_service_log_file = format(
    '{solr_config_service_log_dir}/solr-service.log')
solr_config_conf_dir = map_solr_config['solr_config_conf_dir']
solr_config_data_dir = map_solr_config['solr_config_data_dir']
Example #36
0
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize =  "1024m"
ttnode_heapsize = "1024m"

dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

#users and groups
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
user_group = config['configurations']['cluster-env']['user_group']

namenode_host = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_host) == 0

if has_namenode:
  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
Example #37
0
See the License for the specific language governing permissions and
limitations under the License.

"""

from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version

config = Script.get_config()

dfs_type = default("/commandParams/dfs_type", "")

sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# current host stack version
current_version = default("/hostLevelParams/current_version", None)

# default hadoop params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"

# HDP 2.2+ params
current_service = config['serviceName']

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']

#users and groups
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']

user_group = config['configurations']['cluster-env']['user_group']

#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])

has_namenode = not len(namenode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_slaves = not len(slave_hosts) == 0
Example #39
0
"""
from resource_management.libraries.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import default, format

config = Script.get_config()

pid_dir = config['configurations']['storm-env']['storm_pid_dir']
pid_nimbus = format("{pid_dir}/nimbus.pid")
pid_supervisor = format("{pid_dir}/supervisor.pid")
pid_drpc = format("{pid_dir}/drpc.pid")
pid_ui = format("{pid_dir}/ui.pid")
pid_logviewer = format("{pid_dir}/logviewer.pid")
pid_rest_api = format("{pid_dir}/restapi.pid")
pid_files = {"logviewer":pid_logviewer,
             "ui": pid_ui,
             "nimbus": pid_nimbus,
             "supervisor": pid_supervisor,
             "drpc": pid_drpc,
             "rest_api": pid_rest_api}

# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path()
tmp_dir = Script.get_tmp_dir()
conf_dir = "/etc/storm/conf"
storm_user = config['configurations']['storm-env']['storm_user']
storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)
Example #40
0
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user =  config['configurations']['cluster-env']['smokeuser']
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]

user_group = config['configurations']['cluster-env']['user_group']
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")

hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']

# repo templates
repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']

#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
Example #41
0
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {'FLINK': 'flink'}

#component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "FLINK_SERVICE")

config = Script.get_config()
stack_root = Script.get_stack_root()
stack_version_unformatted = str(config['clusterLevelParams']['stack_version'])
stack_version_formatted = format_stack_version(stack_version_unformatted)

pid_dir = config['configurations']['flink-env']['flink_pid_dir']
pid_file = format("{pid_dir}/flink.pid")

pid_files = {"flink": pid_file}

# Security related/required params
hostname = config['agentLevelParams']['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
tmp_dir = Script.get_tmp_dir()

flink_component_home_dir = "/opt/flink"
conf_dir = "/opt/flink/conf"

flink_user = config['configurations']['flink-env']['flink_user']
flink_ui_principal = default('/configurations/flink-env/flink_principal_name',
                             None)
flink_ui_keytab = default('/configurations/flink-env/flink_keytab', None)

stack_name = default("/clusterLevelParams/stack_name", None)
metron_random_access_indexing_topology = 'random_access_indexing'
indexing_input_topic = config['configurations']['metron-indexing-env']['indexing_input_topic']
indexing_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_configured'
indexing_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_acl_configured'
indexing_hdfs_perm_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hdfs_perm_configured'
indexing_hbase_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hbase_configured'
indexing_hbase_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_indexing_hbase_acl_configured'

# Elasticsearch
elasticsearch_template_installed_flag_file = metron_zookeeper_config_path + '/../metron_elasticsearch_template_installed_flag_file'

# Solr
solr_schema_installed_flag_file = metron_zookeeper_config_path + '/../metron_solr_schema_installed_flag_file'

# REST
metron_rest_host = default("/clusterHostInfo/metron_rest_hosts", [hostname])[0]
metron_rest_port = config['configurations']['metron-rest-env']['metron_rest_port']
rest_kafka_configured_flag_file = metron_zookeeper_config_path + '/../metron_rest_kafka_configured'
rest_kafka_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_rest_kafka_acl_configured'
rest_hbase_configured_flag_file = metron_zookeeper_config_path + '/../metron_rest_hbase_configured'
rest_hbase_acl_configured_flag_file = metron_zookeeper_config_path + '/../metron_rest_hbase_acl_configured'
user_settings_hbase_table = config['configurations']['metron-rest-env']['user_settings_hbase_table']
user_settings_hbase_cf = config['configurations']['metron-rest-env']['user_settings_hbase_cf']

# Alerts UI
metron_alerts_ui_host = default("/clusterHostInfo/metron_alerts_ui_hosts", [hostname])[0]
metron_alerts_ui_port = config['configurations']['metron-alerts-ui-env']['metron_alerts_ui_port']

# Management UI
metron_management_ui_host = default("/clusterHostInfo/metron_management_ui_hosts", [hostname])[0]
metron_management_ui_port = config['configurations']['metron-management-ui-env']['metron_management_ui_port']
Example #43
0
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_architecture import get_architecture
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.namenode_ha_utils import get_properties_for_all_nameservices, namenode_federation_enabled

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_root = Script.get_stack_root()

architecture = get_architecture()

dfs_type = default("/clusterLevelParams/dfs_type", "")

artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jdk_name = default("/ambariLevelParams/jdk_name", None)
java_home = config['ambariLevelParams']['java_home']
java_version = expect("/ambariLevelParams/java_version", int)
jdk_location = config['ambariLevelParams']['jdk_location']

hadoop_custom_extensions_enabled = default(
    "/configurations/core-site/hadoop.custom-extensions.enabled", False)

sudo = AMBARI_SUDO_BINARY

ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']

stack_version_unformatted = config['clusterLevelParams']['stack_version']
  pid_supervisor = format("{pid_dir}/supervisor.pid")
  pid_drpc = format("{pid_dir}/drpc.pid")
  pid_ui = format("{pid_dir}/ui.pid")
  pid_logviewer = format("{pid_dir}/logviewer.pid")
  pid_rest_api = format("{pid_dir}/restapi.pid")

  pid_files = {
    "logviewer":pid_logviewer,
    "ui": pid_ui,
    "nimbus": pid_nimbus,
    "supervisor": pid_supervisor,
    "drpc": pid_drpc,
    "rest_api": pid_rest_api
  }

  # Security related/required params
  hostname = config['hostname']
  security_enabled = config['configurations']['cluster-env']['security_enabled']
  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
  tmp_dir = Script.get_tmp_dir()

  storm_component_home_dir = "/usr/lib/storm"
  conf_dir = "/etc/storm/conf"
  if Script.is_hdp_stack_greater_or_equal("2.2"):
    storm_component_home_dir = format("/usr/hdp/current/{component_directory}")
    conf_dir = format("/usr/hdp/current/{component_directory}/conf")

  storm_user = config['configurations']['storm-env']['storm_user']
  storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
  storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)