示例#1
0
def install_lzo_if_needed():
    """
  Install lzo package if {#should_install_lzo} is true
  """
    if not should_install_lzo():
        return

    lzo_packages = get_lzo_packages()

    config = Script.get_config()
    agent_stack_retry_on_unavailability = config['hostLevelParams'][
        'agent_stack_retry_on_unavailability']
    agent_stack_retry_count = expect(
        "/hostLevelParams/agent_stack_retry_count", int)

    Package(lzo_packages,
            retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
            retry_count=agent_stack_retry_count)
示例#2
0
文件: lzo_utils.py 项目: vion1/ambari
def install_lzo_if_needed():
  """
  Install lzo package if {#should_install_lzo} is true
  """
  if not should_install_lzo():
    return

  # If user has just accepted GPL license. GPL repository can not yet be present.
  Script.repository_util.create_repo_files()

  lzo_packages = get_lzo_packages()

  config = Script.get_config()
  agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
  agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)

  Package(lzo_packages,
          retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
          retry_count=agent_stack_retry_count
  )
示例#3
0
def install_lzo_if_needed():
  """
  Install lzo package if {#should_install_lzo} is true
  """
  if not should_install_lzo():
    return

  if skip_package_operations():
    Logger.info("Skipping LZO package installation as host is sys prepped")
    return

  # If user has just accepted GPL license. GPL repository can not yet be present.
  Script.repository_util.create_repo_files()

  lzo_packages = get_lzo_packages()

  config = Script.get_config()
  agent_stack_retry_on_unavailability = config['ambariLevelParams']['agent_stack_retry_on_unavailability']
  agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count", int)

  Package(lzo_packages,
          retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
          retry_count=agent_stack_retry_count
  )
示例#4
0
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.expect import expect
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions.version import get_major_version

# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = status_params.stack_name
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
version = default("/commandParams/version", None)
component_directory = status_params.component_directory
etc_prefix_dir = "/etc/hbase"

stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
major_stack_version = get_major_version(stack_version_formatted)
stack_root = status_params.stack_root

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
示例#5
0
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
upgrade_direction = default("/commandParams/upgrade_direction", None)
rolling_restart = default("/commandParams/rolling_restart", False)
rolling_restart_safemode_exit_timeout = default(
    "/configurations/cluster-env/namenode_rolling_restart_safemode_exit_timeout",
    None)
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)

# there is a stack upgrade which has not yet been finalized; it's currently suspended
upgrade_suspended = default("roleParams/upgrade_suspended", False)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

# The desired role is only available during a Non-Rolling Upgrade in HA.
# The server calculates which of the two NameNodes will be the active, and the other the standby since they
# are started using different commands.
desired_namenode_role = default("/commandParams/desired_namenode_role", None)

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
示例#6
0
kafka_user_nproc_limit = default(
    "/configurations/kafka-env/kafka_user_nproc_limit", None)

kafka_user = config['configurations']['kafka-env']['kafka_user']
kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
kafka_pid_dir = status_params.kafka_pid_dir
kafka_pid_file = kafka_pid_dir + "/kafka.pid"
# This is hardcoded on the kafka bash process lifecycle on which we have no control over
kafka_managed_pid_dir = "/var/run/kafka"
kafka_managed_log_dir = "/var/log/kafka"
user_group = config['configurations']['cluster-env']['user_group']
java64_home = config['hostLevelParams']['java_home']
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_hosts = config['clusterHostInfo']['kafka_broker_hosts']
kafka_hosts.sort()
zk_session_timeout = expect(
    "/configurations/kafka-broker/zookeeper.session.timeout.ms", int)

zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts.sort()

if (('kafka-log4j' in config['configurations'])
        and ('content' in config['configurations']['kafka-log4j'])):
    log4j_props = config['configurations']['kafka-log4j']['content']
else:
    log4j_props = None

metric_collector_host = ""
metric_collector_port = ""

ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_metric_collector = not len(ams_collector_hosts) == 0
示例#7
0
  if i == 1 or (i > 1 and is_atlas_ha_enabled is False):
    additional_props["atlas.server.ha.enabled"] = "false"
  elif i > 1:
    additional_props["atlas.server.ha.enabled"] = "true"

  return additional_props
  
# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()

# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']

java_version = expect("/hostLevelParams/java_version", int)

zk_root = default('/configurations/application-properties/atlas.server.ha.zookeeper.zkroot', '/apache_atlas')
stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
atlas_kafka_group_id = default('/configurations/application-properties/atlas.kafka.hook.group.id', None)

if security_enabled:
  _hostname_lowercase = config['hostname'].lower()
  _atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal']
  atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase)
  atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab']

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

# stack version
示例#8
0
smokeuser = config['configurations']['cluster-env']['smokeuser']
hbase_root_dir = config['configurations']['ams-hbase-site']['hbase.rootdir']
hbase_pid_dir = status_params.hbase_pid_dir

is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
is_local_fs_rootdir = hbase_root_dir.startswith('file://')

# security is disabled for embedded mode, when HBase is backed by file
security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']

# this is "hadoop-metrics.properties" for 1.x stacks
metric_prop_file_name = "hadoop-metrics2-hbase.properties"

# not supporting 32 bit jdk.
java64_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)

metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
skip_disk_metrics_patterns = default("/configurations/ams-env/timeline.metrics.skip.disk.metrics.patterns", None)

hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)
master_heapsize = config['configurations']['ams-hbase-env']['hbase_master_heapsize']
regionserver_heapsize = config['configurations']['ams-hbase-env']['hbase_regionserver_heapsize']

# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_heapsize), "m")
master_heapsize = check_append_heap_property(str(master_heapsize), "m")
regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
示例#9
0
"""

from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from resource_management.core.system import System
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import default, format
from resource_management.libraries.functions.expect import expect

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = config['clusterLevelParams']['stack_version']
agent_stack_retry_on_unavailability = config['ambariLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count", int)
stack_version_formatted = format_stack_version(stack_version_unformatted)

#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user =  config['configurations']['cluster-env']['smokeuser']
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]

user_group = config['configurations']['cluster-env']['user_group']
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")

hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']

# repo templates
示例#10
0
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements.  See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.  The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License.  You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons import OSCheck
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.expect import expect

if OSCheck.is_windows_family():
    pass
else:
    from params_linux import *

java_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)

host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
示例#11
0
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.expect import expect
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions.version import get_major_version

# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = status_params.stack_name
agent_stack_retry_on_unavailability = config['ambariLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count", int)
version = default("/commandParams/version", None)
component_directory = status_params.component_directory
etc_prefix_dir = "/etc/hbase"

stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
major_stack_version = get_major_version(stack_version_formatted)
stack_root = status_params.stack_root

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
示例#12
0
    default('/configurations/kerberos-env/executable_search_paths', None))
yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
rm_hosts = config['clusterHostInfo']['resourcemanager_hosts']
rm_host = rm_hosts[0]
rm_port = config['configurations']['yarn-site'][
    'yarn.resourcemanager.webapp.address'].split(':')[-1]
rm_https_port = default(
    '/configurations/yarn-site/yarn.resourcemanager.webapp.https.address',
    ":8090").split(':')[-1]

java64_home = config['ambariLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled",
                             False)
java_version = expect("/ambariLevelParams/java_version", int)

yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
resourcemanager_heapsize = config['configurations']['yarn-env'][
    'resourcemanager_heapsize']
nodemanager_heapsize = config['configurations']['yarn-env'][
    'nodemanager_heapsize']
apptimelineserver_heapsize = default(
    "/configurations/yarn-env/apptimelineserver_heapsize", 1024)
ats_leveldb_dir = config['configurations']['yarn-site'][
    'yarn.timeline-service.leveldb-timeline-store.path']
ats_leveldb_lock_file = os.path.join(ats_leveldb_dir,
                                     "leveldb-timeline-store.ldb", "LOCK")
yarn_log_dir_prefix = config['configurations']['yarn-env'][
    'yarn_log_dir_prefix']
yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
示例#13
0
hadoop_bin_dir = "/usr/bin"
daemon_script = "/usr/lib/ams-hbase/bin/hbase-daemon.sh"
region_mover = "/usr/lib/ams-hbase/bin/region_mover.rb"
region_drainer = "/usr/lib/ams-hbase/bin/draining_servers.rb"
hbase_cmd = "/usr/lib/ams-hbase/bin/hbase"

hadoop_conf_dir = '/etc/hadoop'
hbase_conf_dir = "/etc/ams-hbase/conf"

limits_conf_dir = "/etc/security/limits.d"
sudo = AMBARI_SUDO_BINARY

dfs_type = default("/clusterLevelParams/dfs_type", "")

hbase_regionserver_shutdown_timeout = expect(
    '/configurations/ams-hbase-env/hbase_regionserver_shutdown_timeout', int,
    30)

grafana_pid_file = format("{ams_grafana_pid_dir}/grafana-server.pid")
grafana_process_exists_cmd = as_user(
    format("test -f {grafana_pid_file} && ps -p `cat {grafana_pid_file}`"),
    ams_user)

mount_table_content = None
if 'viewfs-mount-table' in config['configurations']:
    xml_inclusion_file_name = 'viewfs-mount-table.xml'
    mount_table = config['configurations']['viewfs-mount-table']

    if 'content' in mount_table and mount_table['content'].strip():
        mount_table_content = mount_table['content']
示例#14
0
hbase_javaopts_properties = config['configurations'][
    'hbase-javaopts-properties']['content']

hbase_javaopts_properties = str(hbase_javaopts_properties)
if hbase_javaopts_properties.find('-Diop.version') == -1:
    iop_full_version = format_stack_version(version)
    hbase_javaopts_properties = hbase_javaopts_properties + ' -Diop.version=' + str(
        iop_full_version)

regionserver_heapsize = ensure_unit_for_memory(
    config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
regionserver_xmn_max = config['configurations']['hbase-env'][
    'hbase_regionserver_xmn_max']
regionserver_xmn_percent = expect(
    "/configurations/hbase-env/hbase_regionserver_xmn_ratio",
    float)  #AMBARI-15614
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize,
                                          regionserver_xmn_percent,
                                          regionserver_xmn_max)

phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled',
                          False)
has_phoenix = len(phoenix_hosts) > 0

if not has_phoenix and not phoenix_enabled:
    exclude_packages = ['phoenix*']
else:
    exclude_packages = []
示例#15
0
# Needed since this writes out the Atlas Hive Hook config file.
cluster_name = config['clusterName']
serviceName = config['serviceName']
role = config['role']

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
upgrade_direction = default("/commandParams/upgrade_direction", None)
agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)

stack_root = status_params.stack_root

# The source stack will be present during a cross-stack upgrade.
# E.g., BigInsights-4.2.5 or HDP-2.6
source_stack = default("/commandParams/source_stack", None)
if source_stack is None:
    source_stack = upgrade_summary.get_source_stack("OOZIE")

# This variable name is important, do not change
source_stack_name = get_stack_name(source_stack)

stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
major_stack_version = get_major_version(stack_version_formatted)
示例#16
0
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.namenode_ha_utils import get_properties_for_all_nameservices, namenode_federation_enabled

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_root = Script.get_stack_root()

architecture = get_architecture()

dfs_type = default("/clusterLevelParams/dfs_type", "")

artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jdk_name = default("/ambariLevelParams/jdk_name", None)
java_home = config['ambariLevelParams']['java_home']
java_version = expect("/ambariLevelParams/java_version", int)
jdk_location = config['ambariLevelParams']['jdk_location']

hadoop_custom_extensions_enabled = default(
    "/configurations/core-site/hadoop.custom-extensions.enabled", False)

sudo = AMBARI_SUDO_BINARY

ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']

stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

upgrade_type = Script.get_upgrade_type(
    default("/commandParams/upgrade_type", ""))
version = default("/commandParams/version", None)
示例#17
0
major_stack_version = get_major_version(stack_version_formatted)

#hadoop params
if rpm_version is not None:
    #RPM versioning support
    rpm_version = default("/configurations/hadoop-env/rpm_version", None)

hadoop_native_lib = format("/usr/lib/ams-hbase/lib/hadoop-native")
hadoop_bin_dir = "/usr/bin"
daemon_script = "/usr/lib/ams-hbase/bin/hbase-daemon.sh"
region_mover = "/usr/lib/ams-hbase/bin/region_mover.rb"
region_drainer = "/usr/lib/ams-hbase/bin/draining_servers.rb"
hbase_cmd = "/usr/lib/ams-hbase/bin/hbase"

hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hbase_conf_dir = "/etc/ams-hbase/conf"

limits_conf_dir = "/etc/security/limits.d"
sudo = AMBARI_SUDO_BINARY

dfs_type = default("/clusterLevelParams/dfs_type", "")

hbase_regionserver_shutdown_timeout = expect(
    '/configurations/ams-hbase-env/hbase_regionserver_shutdown_timeout', int,
    30)

grafana_pid_file = format("{ams_grafana_pid_dir}/grafana-server.pid")
grafana_process_exists_cmd = as_user(
    format("test -f {grafana_pid_file} && ps -p `cat {grafana_pid_file}`"),
    ams_user)