예제 #1
0
def check_indexer_parameters():
    """
    Ensure that all required parameters have been defined for the chosen
    Indexer; either Solr or Elasticsearch.
    """
    missing = []
    config = Script.get_config()
    indexer = config['configurations']['metron-indexing-env']['ra_indexing_writer']
    Logger.info('Checking parameters for indexer = ' + indexer)

    if indexer == 'Solr':
      # check for all required solr parameters
      if not config['configurations']['metron-env']['solr_zookeeper_url']:
        missing.append("metron-env/solr_zookeeper_url")

    else:
      # check for all required elasticsearch parameters
      if not config['configurations']['metron-env']['es_cluster_name']:
        missing.append("metron-env/es_cluster_name")
      if not config['configurations']['metron-env']['es_hosts']:
        missing.append("metron-env/es_hosts")
      if not config['configurations']['metron-env']['es_date_format']:
        missing.append("metron-env/es_date_format")

    if len(missing) > 0:
      raise Fail("Missing required indexing parameters(s): indexer={0}, missing={1}".format(indexer, missing))
예제 #2
0
  def actionexecute(self, env):
    config = Script.get_config()

    version = default('/commandParams/version', None)
    stack_name = default('/hostLevelParams/stack_name', "")

    if not version:
      raise Fail("Value is required for '/commandParams/version'")
  
    # other os?
    if OSCheck.is_redhat_family():
      cmd = ('/usr/bin/yum', 'clean', 'all')
      code, out = shell.call(cmd, sudo=True)

    min_ver = format_hdp_stack_version("2.2")
    real_ver = format_hdp_stack_version(version)
    if stack_name == "HDP":
      if compare_versions(real_ver, min_ver) >= 0:
        cmd = ('hdp-select', 'set', 'all', version)
        code, out = shell.call(cmd, sudo=True)

      if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
        # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
        for k, v in conf_select.PACKAGE_DIRS.iteritems():
          for dir_def in v:
            link_config(dir_def['conf_dir'], dir_def['current_dir'])
예제 #3
0
def should_install_falcon_atlas_hook():
  config = Script.get_config()
  stack_version_unformatted = config['hostLevelParams']['stack_version']
  stack_version_formatted = format_stack_version(stack_version_unformatted)
  if check_stack_feature(StackFeature.FALCON_ATLAS_SUPPORT_2_3, stack_version_formatted) \
      or check_stack_feature(StackFeature.FALCON_ATLAS_SUPPORT, stack_version_formatted):
    return _has_applicable_local_component(config, ['FALCON_SERVER'])
  return False
예제 #4
0
def should_install_mysql():
  config = Script.get_config()
  hive_database = config['configurations']['hive-env']['hive_database']
  hive_use_existing_db = hive_database.startswith('Existing')

  if hive_use_existing_db:
    return False
  return _has_applicable_local_component(config, "MYSQL_SERVER")
예제 #5
0
  def actionexecute(self, env):
    resolve_ambari_config()

    # Parse parameters from command json file.
    config = Script.get_config()

    host_name = socket.gethostname()
    version = default('/roleParams/version', None)

    # These 2 variables are optional
    service_package_folder = default('/roleParams/service_package_folder', None)
    hooks_folder = default('/roleParams/hooks_folder', None)

    tasks = json.loads(config['roleParams']['tasks'])
    if tasks:
      for t in tasks:
        task = ExecuteTask(t)
        Logger.info(str(task))

        # If a (script, function) exists, it overwrites the command.
        if task.script and task.function and service_package_folder and hooks_folder:
          file_cache = FileCache(agent_config)
          command_paths = {"commandParams":
                                 {"service_package_folder": service_package_folder,
                                  "hooks_folder": hooks_folder
                                 }
                              }
          server_url_prefix = default('/hostLevelParams/jdk_location', "")
          base_dir = file_cache.get_service_base_dir(command_paths, server_url_prefix)
          script_path = os.path.join(base_dir, task.script)
          if not os.path.exists(script_path):
            message = "Script %s does not exist" % str(script_path)
            raise Fail(message)

          # Notice that the script_path is now the fully qualified path, and the
          # same command-#.json file is used.
          # Also, the python wrapper is used, since it sets up the correct environment variables
          command_params = ["/usr/bin/ambari-python-wrap",
                            script_path,
                            task.function,
                            self.command_data_file,
                            self.basedir,
                            self.stroutfile,
                            self.logging_level,
                            Script.get_tmp_dir()]

          task.command = " ".join(command_params)
          # Replace redundant whitespace to make the unit tests easier to validate
          task.command = re.sub("\s+", " ", task.command).strip()

        if task.command:
          task.command = replace_variables(task.command, host_name, version)
          code, out = shell.call(task.command)
          Logger.info("Command: %s\nCode: %s, Out: %s" % (task.command, str(code), str(out)))
          if code != 0:
            raise Fail(out)
예제 #6
0
def default(name, default_value):
  subdicts = filter(None, name.split('/'))

  curr_dict = Script.get_config()
  if not curr_dict:
    return default_value
  for x in subdicts:
    if x in curr_dict:
      curr_dict = curr_dict[x]
    else:
      return default_value

  return curr_dict
예제 #7
0
파일: default.py 프로젝트: duxia/ambari
def default(name, default_value):
  subdicts = filter(None, name.split('/'))

  curr_dict = Script.get_config()
  for x in subdicts:
    if x in curr_dict:
      curr_dict = curr_dict[x]
    else:
      if not isinstance(default_value, UnknownConfiguration):
        Logger.debug("Cannot find configuration: '%s'. Using '%s' value as default" % (name, default_value))
      return default_value

  return curr_dict
예제 #8
0
  def remove_hdp_21(self, env):
    """
    During Express Upgrade from HDP 2.1 to any higher version (HDP 2.2 or 2.3), the HDP 2.1 bits must be uninstalled.
    This is because /usr/bin/hadoop used to be a shell script in HDP 2.1, but in HDP 2.3 it is
    a symlink to /usr/hdp/current/hadoop-client/bin/hadoop
    so both versions cannot coexist.
    """
    Logger.info("Attempting to remove bits for HDP 2.1")
    config = Script.get_config()

    packages_to_remove = ["zookeeper", "hadoop", "hadoop-lzo", "hadoop-hdfs", "hadoop-libhdfs", "hadoop-yarn", "hadoop-client", "hadoop-mapreduce", "hive", "hive-hcatalog", "hive-jdbc", "hive-webhcat", "hcatalog", "webhcat-tar-hive", "webhcat-tar-pig", "oozie", "oozie-client", "pig", "sqoop", "tez" "falcon", "storm", "flume", "hbase", "phoenix"]
    packages_to_remove.reverse()
    Logger.info("Packages to remove: {0}".format(" ".join(packages_to_remove)))

    for name in packages_to_remove:
      Logger.info("Attempting to remove {0}".format(name))
      Package(name, action="remove")
def get_not_managed_resources():
  """
  Returns a list of not managed hdfs paths.
  The result contains all paths from hostLevelParams/not_managed_hdfs_path_list
  except config values from cluster-env/managed_hdfs_resource_property_names
  """
  config = Script.get_config()
  not_managed_hdfs_path_list = json.loads(config['hostLevelParams']['not_managed_hdfs_path_list'])[:]
  managed_hdfs_resource_property_names = config['configurations']['cluster-env']['managed_hdfs_resource_property_names']
  managed_hdfs_resource_property_list = filter(None, [property.strip() for property in managed_hdfs_resource_property_names.split(',')])

  for property_name in managed_hdfs_resource_property_list:
    property_value = default('/configurations/' + property_name, None)

    if property_value == None:
      Logger.warning(("Property {0} from cluster-env/managed_hdfs_resource_property_names not found in configurations. "
                     "Management of this DFS resource will not be forced.").format(property_name))
    else:
      while property_value in not_managed_hdfs_path_list:
        not_managed_hdfs_path_list.remove(property_value)

  return not_managed_hdfs_path_list
예제 #10
0
def expect(name, expected_type, default_value=None):
  """
  Expect configuration to be of certain type. If it is not, give a reasonable error message to user.
  
  Optionally if the configuration is not found default_value for it can be returned.
  """
  subdicts = filter(None, name.split('/'))

  curr_dict = Script.get_config()
  for x in subdicts:
    if x in curr_dict:
      curr_dict = curr_dict[x]
    else:
      if default_value:
        return default_value
      return UnknownConfiguration(curr_dict[-1])
  value = curr_dict
  
  if expected_type == bool:
    if isinstance(value, bool):
      return value
    elif isinstance(value, basestring):
      if value != None and value.lower() == "true":
        value = True
      elif value != None and value.lower() == "false":
        value = False
      else:
        raise Fail("Configuration {0} expected to be boolean (true or false), but found '{1}'".format(name, value))
    else:
      type_name = type(value).__name__
      raise Fail("Configuration {0} expected to be boolean (true or false), but found instance of unknown type '{1}'".format(name, type_name))
  elif expected_type in [int, long, float]:
    try:
      value = expected_type(value)
    except (ValueError, TypeError):
      raise Fail("Configuration {0} expected to be number, but found '{1}'".format(name, value))
  return value
예제 #11
0
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.functions import is_empty
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = status_params.stack_root
sudo = AMBARI_SUDO_BINARY

# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']

stack_name = status_params.stack_name
upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)

agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)
예제 #12
0
def should_install_kerberos_server():
    config = Script.get_config()
    return 'role' in config and config['role'] != "KERBEROS_CLIENT"
예제 #13
0
def should_install_logsearch_portal():
    config = Script.get_config()
    return 'role' in config and config['role'] == "LOGSEARCH_SERVER"
예제 #14
0
def should_install_logsearch_solr_client():
    config = Script.get_config()
    return 'role' in config and (config['role'] == "LOGSEARCH_SOLR_CLIENT"
                                 or config['role'] == 'ATLAS_SERVER'
                                 or config['role'] == 'RANGER_ADMIN')
예제 #15
0
def should_install_infra_solr_client():
  config = Script.get_config()
  return _has_applicable_local_component(config, ['INFRA_SOLR_CLIENT', 'ATLAS_SERVER', 'RANGER_ADMIN'])
예제 #16
0
def should_install_ams_grafana():
  config = Script.get_config()
  return _has_applicable_local_component(config, ["METRICS_GRAFANA"])
예제 #17
0
def should_install_ams_collector():
    config = Script.get_config()
    return 'role' in config and config['role'] == "METRICS_COLLECTOR"
예제 #18
0
def should_install_ams_grafana():
    config = Script.get_config()
    return _has_applicable_local_component(config, ["METRICS_GRAFANA"])
예제 #19
0
def should_install_ams_collector():
    config = Script.get_config()
    return _has_applicable_local_component(config, ["METRICS_COLLECTOR"])
예제 #20
0
def should_install_rpcbind():
    config = Script.get_config()
    return _has_applicable_local_component(config, ["NFS_GATEWAY"])
예제 #21
0
def should_install_ranger_tagsync():
  config = Script.get_config()
  ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
  has_ranger_tagsync = len(ranger_tagsync_hosts) > 0

  return has_ranger_tagsync
예제 #22
0
def should_install_rpcbind():
  config = Script.get_config()
  return _has_applicable_local_component(config, ["NFS_GATEWAY"])
예제 #23
0
def should_install_logsearch_portal():
  config = Script.get_config()
  return _has_applicable_local_component(config, ["LOGSEARCH_SERVER"])
예제 #24
0
def should_install_ranger_tagsync():
    config = Script.get_config()
    ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
    has_ranger_tagsync = len(ranger_tagsync_hosts) > 0

    return has_ranger_tagsync
예제 #25
0
def should_install_rpcbind():
    config = Script.get_config()
    return 'role' in config and config['role'] == "NFS_GATEWAY"
예제 #26
0
def should_install_ams_collector():
  config = Script.get_config()
  return _has_applicable_local_component(config, ["METRICS_COLLECTOR"])
예제 #27
0
def should_install_ams_grafana():
    config = Script.get_config()
    return 'role' in config and config['role'] == "METRICS_GRAFANA"
예제 #28
0
def should_install_logsearch_solr():
    config = Script.get_config()
    return 'role' in config and config['role'] == "LOGSEARCH_SOLR"
예제 #29
0
    def actionexecute(self, env):
        resolve_ambari_config()

        # Parse parameters from command json file.
        config = Script.get_config()

        host_name = socket.gethostname()
        version = default('/roleParams/version', None)

        # These 2 variables are optional
        service_package_folder = default('/roleParams/service_package_folder',
                                         None)
        hooks_folder = default('/roleParams/hooks_folder', None)

        tasks = json.loads(config['roleParams']['tasks'])
        if tasks:
            for t in tasks:
                task = ExecuteTask(t)
                Logger.info(str(task))

                # If a (script, function) exists, it overwrites the command.
                if task.script and task.function:
                    file_cache = FileCache(agent_config)

                    server_url_prefix = default(
                        '/hostLevelParams/jdk_location', "")

                    if service_package_folder and hooks_folder:
                        command_paths = {
                            "commandParams": {
                                "service_package_folder":
                                service_package_folder,
                                "hooks_folder": hooks_folder
                            }
                        }

                        base_dir = file_cache.get_service_base_dir(
                            command_paths, server_url_prefix)
                    else:
                        base_dir = file_cache.get_custom_actions_base_dir(
                            server_url_prefix)

                    script_path = os.path.join(base_dir, task.script)
                    if not os.path.exists(script_path):
                        message = "Script %s does not exist" % str(script_path)
                        raise Fail(message)

                    # Notice that the script_path is now the fully qualified path, and the
                    # same command-#.json file is used.
                    # Also, the python wrapper is used, since it sets up the correct environment variables
                    command_params = [
                        "/usr/bin/ambari-python-wrap", script_path,
                        task.function, self.command_data_file, self.basedir,
                        self.stroutfile, self.logging_level,
                        Script.get_tmp_dir()
                    ]

                    task.command = "source /var/lib/ambari-agent/ambari-env.sh ; " + " ".join(
                        command_params)
                    # Replace redundant whitespace to make the unit tests easier to validate
                    task.command = re.sub("\s+", " ", task.command).strip()

                if task.command:
                    task.command = replace_variables(task.command, host_name,
                                                     version)
                    shell.checked_call(task.command,
                                       logoutput=True,
                                       quiet=True)
예제 #30
0
def should_install_infra_solr():
  config = Script.get_config()
  return _has_applicable_local_component(config, ["INFRA_SOLR"])
예제 #31
0
def should_install_infra_solr():
    config = Script.get_config()
    return _has_applicable_local_component(config, ["INFRA_SOLR"])
예제 #32
0
def should_install_lzo():
  config = Script.get_config()
  io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
  lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
  return lzo_enabled
예제 #33
0
def should_install_infra_solr_client():
    config = Script.get_config()
    return _has_applicable_local_component(config, [
        'INFRA_SOLR_CLIENT', 'ATLAS_SERVER', 'RANGER_ADMIN', 'LOGSEARCH_SERVER'
    ])
예제 #34
0
def should_install_logsearch_portal():
    config = Script.get_config()
    return _has_applicable_local_component(config, ["LOGSEARCH_SERVER"])
예제 #35
0
import re

import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.

from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.version import compare_versions
from ambari_commons.os_check import OSCheck


config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
jce_location = config['hostLevelParams']['jdk_location']
jdk_name = default("/hostLevelParams/jdk_name", None)
java_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])

ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

security_enabled = config['configurations']['cluster-env']['security_enabled']
예제 #36
0
    def start(self, env):
        import params
        self.configure(env)
        print "start mongodb"

        import socket
        current_host_name = socket.getfqdn(socket.gethostname())

        config = Script.get_config()
        shard_prefix = params.shard_prefix

        db_hosts = config['clusterHostInfo']['mongodb_hosts']

        auth_pattern = ''
        print params.auth
        if params.auth:
            print 'add keyFile'
            # add keyfile
            keyfile_path = '/etc/security/'
            keyfile_name = keyfile_path + 'mongodb-keyfile'
            auth_pattern = ' --keyFile ' + keyfile_name
            if current_host_name == db_hosts[0]:
                Execute(format('openssl rand -base64 741 > {keyfile_name}'),
                        logoutput=True)
                Execute(format('chmod 600 {keyfile_name}'), logoutput=True)
                for index, item in enumerate(db_hosts, start=1):
                    Execute(format(
                        'scp {keyfile_name} root@{item}:{keyfile_path}'),
                            logoutput=True)

        len_host = len(db_hosts)
        len_port = len(params.db_ports)

        if len(params.node_group) > 0:
            db_hosts = self.getdbhosts(db_hosts, params.node_group)
        # start shard service
        for index, item in enumerate(db_hosts, start=0):
            if item == current_host_name:
                # foreach db_ports
                for index_p, p in enumerate(params.db_ports, start=0):
                    # rm mongo_*.sock
                    Execute(format('rm -rf /tmp/mongodb-{p}.sock'),
                            logoutput=True)
                    # get shard_name
                    shard_name = shard_prefix + str(
                        (index - index_p) % len_host)
                    # pid_file_name = params.shard_prefix + str((index-index_p)%len_host)
                    # pid_file_name not the same to log,easy to status
                    pid_file_name = params.shard_prefix + str(index_p)
                    # get db_path
                    db_path = params.db_path + '/' + shard_name

                    if os.path.exists(db_path):
                        print "File exists"
                    else:
                        Execute(format('mkdir -p {db_path}'), logoutput=True)
                    log_file = params.log_path + '/' + shard_name + '.log'
                    pid_file = params.pid_db_path + '/' + pid_file_name + '.pid'
                    Execute(format(
                        'mongod -f /etc/mongod.conf --shardsvr  -replSet {shard_name} -port {p} -dbpath {db_path} -oplogSize 100 -logpath {log_file} -pidfilepath {pid_file} {auth_pattern} '
                    ),
                            logoutput=True)

        sleep(5)
        print 'sleep waiting for all mongod started'

        if params.node_group == '':
            members = ''

            index = db_hosts.index(current_host_name)
            shard_name = shard_prefix + str(index)

            current_index = 0
            current_shard = index
            while (current_index < len_port):
                current_host = db_hosts[current_shard]
                current_port = params.db_ports[current_index]
                members = members + '{_id:' + format(
                    '{current_index},host:"{current_host}:{current_port}"')
                if current_index == 0:
                    members = members + ',priority:2'
                members = members + '},'
                current_index = current_index + 1
                current_shard = (current_shard + 1) % len(db_hosts)

            replica_param = 'rs.initiate( {_id:' + format(
                '"{shard_name}",version: 1,members:') + '[' + members + ']})'

            cmd = format(
                'mongo --host {current_host_name} --port 27017 <<EOF \n{replica_param} \nEOF\n'
            )
            File('/var/run/mongo_config.sh', content=cmd, mode=0755)
            Execute('su - mongodb /var/run/mongo_config.sh', logoutput=True)
        else:

            groups = params.node_group.split(';')

            members = ''

            index = db_hosts.index(current_host_name)
            shard_name = shard_prefix + str(index)

            current_index = 0
            current_shard = index
            while (current_index < len_port):
                current_host = db_hosts[current_shard]
                current_port = params.db_ports[current_index]
                members = members + '{_id:' + format(
                    '{current_index},host:"{current_host}:{current_port}"')
                if current_index == 0:
                    members = members + ',priority:2'
                members = members + '},'
                current_index = current_index + 1
                current_shard = (current_shard + 1) % len(db_hosts)

            # if len(groups) > 1 and current_host_name in groups[-1]:
            #    replica_param ='rs.initiate( {_id:'+format('"{shard_name}",version: 1,members:') + '[' + members + ']})'
            # else:
            replica_param = 'rs.initiate( {_id:' + format(
                '"{shard_name}",version: 1,members:'
            ) + '[' + members + ']},{force:1})'

            cmd = format(
                'mongo --host {current_host_name} --port 27017 <<EOF \n{replica_param} \nEOF\n'
            )
            File('/var/run/mongo_config.sh', content=cmd, mode=0755)
            Execute('/var/run/mongo_config.sh', logoutput=True)