예제 #1
0
def install_repos():
    import params

    template = "repo_suse_rhel.j2" if System.get_instance().os_family in ["suse", "redhat"] else "repo_ubuntu.j2"
    _alter_repo("create", params.repo_info, template)
    if params.service_repo_info:
        _alter_repo("create", params.service_repo_info, template)
예제 #2
0
 def get_serivice_params(self):
     self.system = System.get_instance()
     if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
         self.service_name = "apache2"
         self.httpd_conf_dir = '/etc/apache2'
     else:
         self.service_name = "httpd"
         self.httpd_conf_dir = '/etc/httpd/conf'
예제 #3
0
def install_repos():
    import params
    template = "repo_suse_rhel.j2" if System.get_instance().os_family in [
        "suse", "redhat"
    ] else "repo_debian.j2"
    _alter_repo("create", params.repo_info, template)
    if params.service_repo_info:
        _alter_repo("create", params.service_repo_info, template)
예제 #4
0
 def get_serivice_params(self):
     self.system = System.get_instance()
     if self.system.os_family in ["suse", "ubuntu"]:
         self.service_name = "apache2"
         self.httpd_conf_dir = '/etc/apache2'
     else:
         self.service_name = "httpd"
         self.httpd_conf_dir = '/etc/httpd/conf'
 def get_serivice_params(self):
   self.system = System.get_instance()
   if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
     self.service_name = "apache2"
     self.httpd_conf_dir = '/etc/apache2'
   else:
     self.service_name = "httpd"
     self.httpd_conf_dir = '/etc/httpd/conf'
예제 #6
0
 def __package_exists(self, pkg):
     """
 Low level function to check if a rpm is installed
 """
     if System.get_instance().os_family == "suse":
         return not runLocalCmd("zypper search " + pkg)
     else:
         return not runLocalCmd("yum list installed | egrep -i ^" + pkg)
예제 #7
0
 def get_serivice_params(self):
   self.system = System.get_instance()
   if self.system.os_family in ["suse","ubuntu"]:
     self.service_name = "apache2"
     self.httpd_conf_dir = '/etc/apache2'
   else:
     self.service_name = "httpd"
     self.httpd_conf_dir = '/etc/httpd/conf'
예제 #8
0
    def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
        import params

        File(format("{tmp_dir}/{file_name}"),
             content=StaticFile(file_name),
             mode=0755)
        File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
             content=StaticFile(prepare_hdfs_file_name),
             mode=0755)

        os_family = System.get_instance().os_family
        oozie_examples_dir = glob.glob(params.oozie_examples_regex)[0]

        Execute(format(
            "{tmp_dir}/{prepare_hdfs_file_name} {conf_dir} {oozie_examples_dir} {hadoop_conf_dir} "
        ),
                tries=3,
                try_sleep=5,
                logoutput=True)

        examples_dir = format('/user/{smokeuser}/examples')
        params.HdfsResource(examples_dir,
                            action="delete_on_execute",
                            type="directory")
        params.HdfsResource(examples_dir,
                            action="create_on_execute",
                            type="directory",
                            source=format("{oozie_examples_dir}/examples"),
                            owner=params.smokeuser,
                            group=params.user_group)

        input_data_dir = format('/user/{smokeuser}/input-data')
        params.HdfsResource(input_data_dir,
                            action="delete_on_execute",
                            type="directory")
        params.HdfsResource(
            input_data_dir,
            action="create_on_execute",
            type="directory",
            source=format("{oozie_examples_dir}/examples/input-data"),
            owner=params.smokeuser,
            group=params.user_group)
        params.HdfsResource(None, action="execute")

        if params.security_enabled:
            sh_cmd = format(
                "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}"
            )
        else:
            sh_cmd = format(
                "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}"
            )

        Execute(sh_cmd,
                path=params.execute_path,
                tries=3,
                try_sleep=5,
                logoutput=True)
예제 #9
0
파일: common.py 프로젝트: fkpp/ambari
def __set_osparams():
  """
  Updates parameters in sysctl.conf and limits.conf required by HAWQ.
  """
  # Create a temp scratchpad directory
  utils.create_dir_as_hawq_user(hawq_constants.hawq_tmp_dir)

  # Suse doesn't supports loading values from files in /etc/sysctl.d
  # So we will have to directly edit the sysctl file
  if System.get_instance().os_family == "suse":
    # Update /etc/sysctl.conf
    __update_sysctl_file_suse()
  else:
    # Update /etc/sysctl.d/hawq.conf
    __update_sysctl_file()

  __update_limits_file()
예제 #10
0
def __set_osparams():
  """
  Updates parameters in sysctl.conf and limits.conf required by HAWQ.
  """
  # Create a temp scratchpad directory
  utils.create_dir_as_hawq_user(hawq_constants.hawq_tmp_dir)

  # Suse doesn't supports loading values from files in /etc/sysctl.d
  # So we will have to directly edit the sysctl file
  if System.get_instance().os_family == "suse":
    # Update /etc/sysctl.conf
    __update_sysctl_file_suse()
  else:
    # Update /etc/sysctl.d/hawq.conf
    __update_sysctl_file()

  __update_limits_file()
예제 #11
0
 def reset(self, basedir, test_mode):
   self.system = System.get_instance()
   self.config = AttributeDictionary()
   self.resources = {}
   self.resource_list = []
   self.delayed_actions = set()
   self.test_mode = test_mode
   self.update_config({
     # current time
     'date': datetime.now(),
     # backups here files which were rewritten while executing File resource
     'backup.path': '/tmp/resource_management/backup',
     # prefix for this files 
     'backup.prefix': datetime.now().strftime("%Y%m%d%H%M%S"),
     # dir where templates,failes dirs are 
     'basedir': basedir, 
     # variables, which can be used in templates
     'params': {},
   })
예제 #12
0
def oozie_smoke_shell_file(file_name):
  import params

  File( format("{tmp_dir}/{file_name}"),
    content = StaticFile(file_name),
    mode = 0755
  )

  os_family = System.get_instance().os_family

  if params.security_enabled:
    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
  else:
    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")

  Execute( format("{tmp_dir}/{file_name}"),
    command   = sh_cmd,
    path      = params.execute_path,
    tries     = 3,
    try_sleep = 5,
    logoutput = True
  )
예제 #13
0
    def oozie_smoke_shell_file(file_name):
        import params

        File(format("{tmp_dir}/{file_name}"),
             content=StaticFile(file_name),
             mode=0755)

        os_family = System.get_instance().os_family

        if params.security_enabled:
            sh_cmd = format(
                "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}"
            )
        else:
            sh_cmd = format(
                "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}"
            )

        Execute(format("{tmp_dir}/{file_name}"),
                command=sh_cmd,
                path=params.execute_path,
                tries=3,
                try_sleep=5,
                logoutput=True)
예제 #14
0
파일: params.py 프로젝트: duxia/ambari
    "namenode_host": [("HDPNameNode", 8661)],
    "jtnode_host": [("HDPJobTracker", 8662)],
    "hbase_master_hosts": [("HDPHBaseMaster", 8663)],
    "rm_host": [("HDPResourceManager", 8664)],
    "hs_host": [("HDPHistoryServer", 8666)],
}

ganglia_clusters = [("HDPSlaves", 8660)]

for key in ganglia_cluster_names:
    property_name = format("/clusterHostInfo/{key}")
    hosts = set(default(property_name, []))
    if not len(hosts) == 0:
        for x in ganglia_cluster_names[key]:
            ganglia_clusters.append(x)


ganglia_apache_config_file = "/etc/apache2/conf.d/ganglia.conf"
ganglia_web_path = "/var/www/html/ganglia"
if System.get_instance().os_family == "suse":
    rrd_py_path = "/srv/www/cgi-bin"
    dwoo_path = "/var/lib/ganglia-web/dwoo"
    web_user = "******"
    # for upgrade purposes as path to ganglia was changed
    if not os.path.exists(ganglia_web_path):
        ganglia_web_path = "/srv/www/htdocs/ganglia"
else:
    rrd_py_path = "/var/www/cgi-bin"
    dwoo_path = "/var/lib/ganglia/dwoo"
    web_user = "******"
예제 #15
0
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
##############################################################################
from resource_management import *
from resource_management.core.system import System
import kavecommon as kc

config = Script.get_config()
tos = kc.detect_linux_version()

if System.get_instance().os_family == "suse" or System.get_instance(
).os_family == "ubuntu":
    daemon_name = 'mysql'
    status_daemon_name = daemon_name
# elif System.get_instance().os_family == "redhat7":
elif tos.lower() in ["centos7"]:
    daemon_name = 'mariadb'
    status_daemon_name = 'mysqld'
else:
    daemon_name = 'mysqld'
    status_daemon_name = daemon_name
예제 #16
0
"""

from resource_management import *
from resource_management.core.system import System
import os

config = Script.get_config()

#java params
artifact_dir = "/tmp/HDP-artifacts/"
jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
jce_location = config['hostLevelParams']['jdk_location']
jdk_location = config['hostLevelParams']['jdk_location']
java_home = config['hostLevelParams']['java_home']
if System.get_instance().os_family == "suse":
  jsvc_path = "/usr/lib/bigtop-utils"
else:
  jsvc_path = "/usr/libexec/bigtop-utils"
#security params
_authentication = config['configurations']['core-site']['hadoop.security.authentication']
security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
#hadoop params
hadoop_conf_dir = "/etc/hadoop/conf"

#hadoop-env.sh

java_home = config['hostLevelParams']['java_home']
if System.get_instance().os_family == "suse":
  jsvc_path = "/usr/lib/bigtop-utils"
else:
예제 #17
0
파일: params.py 프로젝트: indoos/ambari
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]

if 'rca_enabled' in config['configurations']['global']:
  rca_enabled =  config['configurations']['global']['rca_enabled']
else:
  rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
  rca_prefix = ""
else:
  rca_prefix = rca_disabled_prefix

#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']

if config['hostLevelParams']['stack_version'] == '2.0.6' and System.get_instance().os_family != "suse":
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
namenode_heapsize = config['configurations']['global']['namenode_heapsize']
namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']

jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
jtnode_heapsize =  default("jtnode_heapsize","1024m")
ttnode_heapsize = "1024m"
예제 #18
0
파일: params.py 프로젝트: renchuanrc/ambari
                           "/etc/ganglia/hdp")
ganglia_dir = "/etc/ganglia"
ganglia_runtime_dir = config['configurations']['ganglia-env'][
    "ganglia_runtime_dir"]
ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"

gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]

gmond_app_str = default("/configurations/hadoop-env/enabled_app_servers", None)
gmond_apps = [] if gmond_app_str is None else gmond_app_str.split(',')
gmond_apps = [x.strip() for x in gmond_apps]
gmond_allowed_apps = ["Application1", "Application2", "Application3"]
gmond_apps = set(gmond_apps) & set(gmond_allowed_apps)

if System.get_instance().os_family == "ubuntu":
    gmond_service_name = "ganglia-monitor"
    modules_dir = "/usr/lib/ganglia"
else:
    gmond_service_name = "gmond"
    modules_dir = "/usr/lib64/ganglia"

webserver_group = "apache"
rrdcached_base_dir = config['configurations']['ganglia-env'][
    "rrdcached_base_dir"]
rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout",
                            3600)
rrdcached_flush_timeout = default(
    "/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
rrdcached_write_threads = default(
예제 #19
0
  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"

hadoop_conf_dir = "/etc/hadoop/conf"
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
versioned_stack_root = '/usr/bigtop/current'
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#java params
java_home = config['hostLevelParams']['java_home']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']

if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize =  "1024m"
예제 #20
0
gmetad_user = config["configurations"]["ganglia-env"]["gmetad_user"]
gmond_user = config["configurations"]["ganglia-env"]["gmond_user"]

gmond_add_clusters_str = default("/configurations/ganglia-env/additional_clusters", None)
if gmond_add_clusters_str and gmond_add_clusters_str.isspace():
    gmond_add_clusters_str = None

gmond_app_strs = [] if gmond_add_clusters_str is None else gmond_add_clusters_str.split(",")
gmond_apps = []

for x in gmond_app_strs:
    a, b = x.strip().split(":")
    gmond_apps.append((a.strip(), b.strip()))

if System.get_instance().os_family == "ubuntu":
    gmond_service_name = "ganglia-monitor"
    modules_dir = "/usr/lib/ganglia"
else:
    gmond_service_name = "gmond"
    modules_dir = "/usr/lib64/ganglia"

webserver_group = "apache"
rrdcached_base_dir = config["configurations"]["ganglia-env"]["rrdcached_base_dir"]
rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout", 3600)
rrdcached_flush_timeout = default("/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
rrdcached_write_threads = default("/configurations/ganglia-env/rrdcached_write_threads", 4)

ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
예제 #21
0
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]

if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
else:
  rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
  rca_prefix = ""
else:
  rca_prefix = rca_disabled_prefix

#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']

if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0 and System.get_instance().os_family != "suse":
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize =  "1024m"
예제 #22
0
#
# Copyright 2016 KPMG Advisory N.V. (unless otherwise stated)
#
# Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
##############################################################################
from resource_management import *
from resource_management.core.system import System
import kavecommon as kc

config = Script.get_config()
tos = kc.detect_linux_version()

if System.get_instance().os_family == "suse" or System.get_instance().os_family == "ubuntu":
    daemon_name = 'mysql'
# elif System.get_instance().os_family == "redhat7":
elif tos.lower() in ["centos7"]:
    daemon_name = 'mariadb'
else:
    daemon_name = 'mysqld'
예제 #23
0
                           "/etc/ganglia/hdp")
ganglia_dir = "/etc/ganglia"
ganglia_runtime_dir = config['configurations']['ganglia-env'][
    "ganglia_runtime_dir"]
ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"

gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]

gmond_app_str = default("/configurations/hadoop-env/enabled_app_servers", None)
gmond_apps = [] if gmond_app_str is None else gmond_app_str.split(',')
gmond_apps = [x.strip() for x in gmond_apps]
gmond_allowed_apps = ["Application1", "Application2", "Application3"]
gmond_apps = set(gmond_apps) & set(gmond_allowed_apps)

if System.get_instance().os_family == "debian":
    gmond_service_name = "ganglia-monitor"
    modules_dir = "/usr/lib/ganglia"
else:
    gmond_service_name = "gmond"
    modules_dir = "/usr/lib64/ganglia"

webserver_group = "apache"
rrdcached_base_dir = config['configurations']['ganglia-env'][
    "rrdcached_base_dir"]
rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout",
                            3600)
rrdcached_flush_timeout = default(
    "/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
rrdcached_write_threads = default(
예제 #24
0
  def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
    import params

    File(format("{tmp_dir}/{file_name}"),
         content=StaticFile(file_name),
         mode=0755
    )
    File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
         content=StaticFile(prepare_hdfs_file_name),
         mode=0755
    )

    os_family = System.get_instance().os_family
    oozie_examples_dir_regex_matches = glob.glob(params.oozie_examples_regex)
    if not oozie_examples_dir_regex_matches:
      raise Fail(format(NO_DOCS_FOLDER_MESSAGE))
    oozie_examples_dir = oozie_examples_dir_regex_matches[0]

    Execute((format("{tmp_dir}/{prepare_hdfs_file_name}"), params.conf_dir, oozie_examples_dir, params.hadoop_conf_dir, params.yarn_resourcemanager_address, params.fs_root, params.service_check_queue_name, params.service_check_job_name),
            tries=3,
            try_sleep=5,
            logoutput=True
    )

    params.HdfsResource(format("/user/{smokeuser}"),
        type="directory",
        action="create_on_execute",
        owner=params.smokeuser,
        mode=params.smoke_hdfs_user_mode,
        )

    examples_dir = format('/user/{smokeuser}/examples')
    params.HdfsResource(examples_dir,
                        action = "delete_on_execute",
                        type = "directory"
    )
    params.HdfsResource(examples_dir,
      action = "create_on_execute",
      type = "directory",
      source = format("{oozie_examples_dir}/examples"),
      owner = params.smokeuser,
      group = params.user_group
    )

    input_data_dir = format('/user/{smokeuser}/input-data')
    params.HdfsResource(input_data_dir,
                        action = "delete_on_execute",
                        type = "directory"
    )
    params.HdfsResource(input_data_dir,
      action = "create_on_execute",
      type = "directory",
      source = format("{oozie_examples_dir}/examples/input-data"),
      owner = params.smokeuser,
      group = params.user_group
    )
    params.HdfsResource(None, action="execute")

    if params.security_enabled:
      sh_cmd = format(
        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
    else:
      sh_cmd = format(
        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {service_check_job_name} {security_enabled}")

    Execute(sh_cmd,
            path=params.execute_path,
            tries=3,
            try_sleep=5,
            logoutput=True
    )
예제 #25
0
  def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
    import params

    File(format("{tmp_dir}/{file_name}"),
         content=StaticFile(file_name),
         mode=0755
    )
    File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
         content=StaticFile(prepare_hdfs_file_name),
         mode=0755
    )

    os_family = System.get_instance().os_family
    oozie_examples_dir = glob.glob(params.oozie_examples_regex)[0]

    Execute(format("{tmp_dir}/{prepare_hdfs_file_name} {conf_dir} {oozie_examples_dir} {hadoop_conf_dir} "),
            tries=3,
            try_sleep=5,
            logoutput=True
    )

    examples_dir = format('/user/{smokeuser}/examples')
    params.HdfsResource(examples_dir,
                        action = "delete_on_execute",
                        type = "directory"
    )
    params.HdfsResource(examples_dir,
      action = "create_on_execute",
      type = "directory",
      source = format("{oozie_examples_dir}/examples"),
      owner = params.smokeuser,
      group = params.user_group
    )

    input_data_dir = format('/user/{smokeuser}/input-data')
    params.HdfsResource(input_data_dir,
                        action = "delete_on_execute",
                        type = "directory"
    )
    params.HdfsResource(input_data_dir,
      action = "create_on_execute",
      type = "directory",
      source = format("{oozie_examples_dir}/examples/input-data"),
      owner = params.smokeuser,
      group = params.user_group
    )
    params.HdfsResource(None, action="execute")

    if params.security_enabled:
      sh_cmd = format(
        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
    else:
      sh_cmd = format(
        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")

    Execute(sh_cmd,
            path=params.execute_path,
            tries=3,
            try_sleep=5,
            logoutput=True
    )
예제 #26
0
파일: params.py 프로젝트: wbear2/ambari
user_group = config['configurations']['global']["user_group"]
ganglia_conf_dir = default("/configurations/global/ganglia_conf_dir", "/etc/ganglia/hdp")
ganglia_dir = "/etc/ganglia"
ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"

gmetad_user = config['configurations']['global']["gmetad_user"]
gmond_user = config['configurations']['global']["gmond_user"]

gmond_app_str = default("/configurations/global/enabled_app_servers", None)
gmond_apps = [] if gmond_app_str is None else gmond_app_str.split(',')
gmond_apps = [x.strip() for x in gmond_apps]
gmond_allowed_apps = ["Application1", "Application2", "Application3"]
gmond_apps = set(gmond_apps) & set(gmond_allowed_apps)

if System.get_instance().os_family == "debian":
  gmond_service_name = "ganglia-monitor"
  modules_dir = "/usr/lib/ganglia"
else:
  gmond_service_name = "gmond"
  modules_dir = "/usr/lib64/ganglia"

webserver_group = "apache"
rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
rrdcached_timeout = default("/configurations/global/rrdcached_timeout", 3600)
rrdcached_delay = default("/configurations/global/rrdcached_delay", 1800)
rrdcached_write_threads = default("/configurations/global/rrdcached_write_threads", 10)

ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]

hostname = config["hostname"]
예제 #27
0
    rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
else:
    rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
    rca_prefix = ""
else:
    rca_prefix = rca_disabled_prefix

#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']

if hdp_stack_version != "" and compare_versions(
        hdp_stack_version, '2.0') >= 0 and compare_versions(
            hdp_stack_version,
            '2.1') < 0 and System.get_instance().os_family != "suse":
    # deprecated rhel jsvc_path
    jsvc_path = "/usr/libexec/bigtop-utils"
else:
    jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env'][
    'namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env'][
    'namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_permsize", "128m")
namenode_opt_maxpermsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_maxpermsize", "256m")
예제 #28
0
파일: params.py 프로젝트: mbrukman/ambari
security_enabled = config['configurations']['cluster-env']['security_enabled']
#java params
java_home = config['hostLevelParams']['java_home']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env'][
    'hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env'][
    'hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env'][
    'hadoop_root_logger']

#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']

if str(config['hostLevelParams']['stack_version']).startswith(
        '2.0') and System.get_instance().os_family != "suse":
    # deprecated rhel jsvc_path
    jsvc_path = "/usr/libexec/bigtop-utils"
else:
    jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env'][
    'namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env'][
    'namenode_opt_maxnewsize']

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
예제 #29
0
파일: params.py 프로젝트: duxia/ambari
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]

if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
else:
  rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
  rca_prefix = ""
else:
  rca_prefix = rca_disabled_prefix

#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']

if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize =  "1024m"