def actionexecute(self, env):
    resolve_ambari_config()

    # Parse parameters from command json file.
    config = Script.get_config()

    host_name = socket.gethostname()
    version = default('/roleParams/version', None)

    # These 2 variables are optional
    service_package_folder = default('/roleParams/service_package_folder', None)
    hooks_folder = default('/roleParams/hooks_folder', None)

    tasks = json.loads(config['roleParams']['tasks'])
    if tasks:
      for t in tasks:
        task = ExecuteTask(t)
        Logger.info(str(task))

        # If a (script, function) exists, it overwrites the command.
        if task.script and task.function and service_package_folder and hooks_folder:
          file_cache = FileCache(agent_config)
          command_paths = {"commandParams":
                                 {"service_package_folder": service_package_folder,
                                  "hooks_folder": hooks_folder
                                 }
                              }
          server_url_prefix = default('/hostLevelParams/jdk_location', "")
          base_dir = file_cache.get_service_base_dir(command_paths, server_url_prefix)
          script_path = os.path.join(base_dir, task.script)
          if not os.path.exists(script_path):
            message = "Script %s does not exist" % str(script_path)
            raise Fail(message)

          # Notice that the script_path is now the fully qualified path, and the
          # same command-#.json file is used.
          # Also, the python wrapper is used, since it sets up the correct environment variables
          command_params = ["/usr/bin/ambari-python-wrap",
                            script_path,
                            task.function,
                            self.command_data_file,
                            self.basedir,
                            self.stroutfile,
                            self.logging_level,
                            Script.get_tmp_dir()]

          task.command = " ".join(command_params)
          # Replace redundant whitespace to make the unit tests easier to validate
          task.command = re.sub("\s+", " ", task.command).strip()

        if task.command:
          task.command = replace_variables(task.command, host_name, version)
          code, out = shell.call(task.command)
          Logger.info("Command: %s\nCode: %s, Out: %s" % (task.command, str(code), str(out)))
          if code != 0:
            raise Fail(out)
Beispiel #2
0
  def test_struct_out(self):
   from resource_management.libraries.script import Script

   configs_path = os.path.join(RMFTestCase._getSrcFolder(),
     "test/python/stacks", self.STACK_VERSION, "configs")

   script = Script()
   script.stroutfile = os.path.join(configs_path, "structured-out-status.json")
   script.load_structured_out()

   self.assertFalse("version" in script.structuredOut)
Beispiel #3
0
def falcon(type, action = None):
  import params

  if action == 'config':
    env = Environment.get_instance()
    # These 2 parameters are used in ../templates/client.properties.j2
    env.config.params["falcon_host"] = params.falcon_host
    env.config.params["falcon_port"] = params.falcon_port
    File(os.path.join(params.falcon_conf_dir, 'falcon-env.sh'),
      content = InlineTemplate(params.falcon_env_sh_template))

    File(os.path.join(params.falcon_conf_dir, 'client.properties'),
      content = Template('client.properties.j2'))

    PropertiesFile(os.path.join(params.falcon_conf_dir, 'runtime.properties'),
      properties = params.falcon_runtime_properties)

    PropertiesFile(os.path.join(params.falcon_conf_dir, 'startup.properties'),
      properties = params.falcon_startup_properties)

  if type == 'server':
    ServiceConfig(params.falcon_win_service_name,
      action = "change_user",
      username = params.falcon_user,
      password = Script.get_password(params.falcon_user))

    if action == 'start':
      Service(params.falcon_win_service_name, action = "start")

    if action == 'stop':
      Service(params.falcon_win_service_name, action = "stop")
  def actionexecute(self, env):
    config = Script.get_config()

    version = default('/commandParams/version', None)
    stack_name = default('/hostLevelParams/stack_name', "")

    if not version:
      raise Fail("Value is required for '/commandParams/version'")
  
    # other os?
    if OSCheck.is_redhat_family():
      cmd = ('/usr/bin/yum', 'clean', 'all')
      code, out = shell.call(cmd, sudo=True)

    min_ver = format_hdp_stack_version("2.2")
    real_ver = format_hdp_stack_version(version)
    if stack_name == "HDP":
      if compare_versions(real_ver, min_ver) >= 0:
        cmd = ('hdp-select', 'set', 'all', version)
        code, out = shell.call(cmd, sudo=True)

      if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
        # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
        for k, v in conf_select.PACKAGE_DIRS.iteritems():
          for dir_def in v:
            link_config(dir_def['conf_dir'], dir_def['current_dir'])
def check_indexer_parameters():
    """
    Ensure that all required parameters have been defined for the chosen
    Indexer; either Solr or Elasticsearch.
    """
    missing = []
    config = Script.get_config()
    indexer = config['configurations']['metron-indexing-env']['ra_indexing_writer']
    Logger.info('Checking parameters for indexer = ' + indexer)

    if indexer == 'Solr':
      # check for all required solr parameters
      if not config['configurations']['metron-env']['solr_zookeeper_url']:
        missing.append("metron-env/solr_zookeeper_url")

    else:
      # check for all required elasticsearch parameters
      if not config['configurations']['metron-env']['es_cluster_name']:
        missing.append("metron-env/es_cluster_name")
      if not config['configurations']['metron-env']['es_hosts']:
        missing.append("metron-env/es_hosts")
      if not config['configurations']['metron-env']['es_date_format']:
        missing.append("metron-env/es_date_format")

    if len(missing) > 0:
      raise Fail("Missing required indexing parameters(s): indexer={0}, missing={1}".format(indexer, missing))
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if Script.is_hdp_stack_greater_or_equal('2.3.0.0'):
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-hdfs-nfs3", params.version)
def should_install_mysql():
  config = Script.get_config()
  hive_database = config['configurations']['hive-env']['hive_database']
  hive_use_existing_db = hive_database.startswith('Existing')

  if hive_use_existing_db:
    return False
  return _has_applicable_local_component(config, "MYSQL_SERVER")
def should_install_falcon_atlas_hook():
  config = Script.get_config()
  stack_version_unformatted = config['hostLevelParams']['stack_version']
  stack_version_formatted = format_stack_version(stack_version_unformatted)
  if check_stack_feature(StackFeature.FALCON_ATLAS_SUPPORT_2_3, stack_version_formatted) \
      or check_stack_feature(StackFeature.FALCON_ATLAS_SUPPORT, stack_version_formatted):
    return _has_applicable_local_component(config, ['FALCON_SERVER'])
  return False
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    if Script.is_hdp_stack_greater_or_equal("2.3"):
      # phoenix uses hbase configs
      conf_select.select(params.stack_name, "hbase", params.version)
      hdp_select.select("phoenix-server", params.version)
Beispiel #10
0
  def test_install_packages(self, package_provider_mock):
    no_packages_config = {
      'hostLevelParams' : {
        'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
        'agent_stack_retry_count': '5',
        'agent_stack_retry_on_unavailability': 'false'
      }
    }
    empty_config = {
      'hostLevelParams' : {
        'package_list' : '',
        'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
        'agent_stack_retry_count': '5',
        'agent_stack_retry_on_unavailability': 'false'
      }
    }
    dummy_config = {
      'hostLevelParams' : {
        'package_list' : "[{\"type\":\"rpm\",\"name\":\"hbase\", \"condition\": \"\"},"
                         "{\"type\":\"rpm\",\"name\":\"yet-another-package\", \"condition\": \"\"}]",
        'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
        'service_repo_info' : "[{\"mirrorsList\":\"abc\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
        'agent_stack_retry_count': '5',
        'agent_stack_retry_on_unavailability': 'false'
      }
    }

    # Testing config without any keys
    with Environment(".", test_mode=True) as env:
      script = Script()
      Script.config = no_packages_config
      script.install_packages(env)
    resource_dump = pprint.pformat(env.resource_list)
    self.assertEquals(resource_dump, "[]")

    # Testing empty package list
    with Environment(".", test_mode=True) as env:
      script = Script()
      Script.config = empty_config
      script.install_packages(env)
    resource_dump = pprint.pformat(env.resource_list)
    self.assertEquals(resource_dump, "[]")

    # Testing installing of a list of packages
    with Environment(".", test_mode=True) as env:
      script = Script()
      Script.config = dummy_config
      script.install_packages("env")
    resource_dump = pprint.pformat(env.resource_list)
    self.assertEqual(resource_dump, '[Package[\'hbase\'], Package[\'yet-another-package\']]')
Beispiel #11
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing Metastore Rolling Upgrade pre-restart")
        import params

        env.set_params(params)

        if Script.is_hdp_stack_greater_or_equal("2.3"):
            self.upgrade_schema(env)

        if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            hdp_select.select("hive-metastore", params.version)
Beispiel #12
0
def default(name, default_value):
  subdicts = filter(None, name.split('/'))

  curr_dict = Script.get_config()
  if not curr_dict:
    return default_value
  for x in subdicts:
    if x in curr_dict:
      curr_dict = curr_dict[x]
    else:
      return default_value

  return curr_dict
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if Script.is_hdp_stack_less_than("2.2"):
      return

    Logger.info("Executing Falcon Server Rolling Upgrade pre-restart")
    conf_select.select(params.stack_name, "falcon", params.version)
    hdp_select.select("falcon-server", params.version)
    falcon_server_upgrade.pre_start_restore()
Beispiel #14
0
def default(name, default_value):
  subdicts = filter(None, name.split('/'))

  curr_dict = Script.get_config()
  for x in subdicts:
    if x in curr_dict:
      curr_dict = curr_dict[x]
    else:
      if not isinstance(default_value, UnknownConfiguration):
        Logger.debug("Cannot find configuration: '%s'. Using '%s' value as default" % (name, default_value))
      return default_value

  return curr_dict
Beispiel #15
0
  def test_structured_out(self, open_mock):
    script = Script()
    script.stroutfile = ''
    self.assertEqual(Script.structuredOut, {})

    script.put_structured_out({"1": "1"})
    self.assertEqual(Script.structuredOut, {"1": "1"})
    self.assertTrue(open_mock.called)

    script.put_structured_out({"2": "2"})
    self.assertEqual(open_mock.call_count, 2)
    self.assertEqual(Script.structuredOut, {"1": "1", "2": "2"})

    #Overriding
    script.put_structured_out({"1": "3"})
    self.assertEqual(open_mock.call_count, 3)
    self.assertEqual(Script.structuredOut, {"1": "3", "2": "2"})
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Metastore Stack Upgrade pre-restart")
    import params

    env.set_params(params)

    is_stack_hdp_23 = Script.is_hdp_stack_greater_or_equal("2.3")
    is_upgrade = params.upgrade_direction == Direction.UPGRADE

    if is_stack_hdp_23 and is_upgrade:
      self.upgrade_schema(env)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      hdp_select.select("hive-metastore", params.version)
Beispiel #17
0
    def test_install_packages(self, package_provider_mock):
        no_packages_config = {
            "hostLevelParams": {
                "repo_info": '[{"baseUrl":"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0","osType":"centos6","repoId":"HDP-2.0._","repoName":"HDP","defaultBaseUrl":"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0"}]'
            }
        }
        empty_config = {
            "hostLevelParams": {
                "package_list": "",
                "repo_info": '[{"baseUrl":"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0","osType":"centos6","repoId":"HDP-2.0._","repoName":"HDP","defaultBaseUrl":"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0"}]',
            }
        }
        dummy_config = {
            "hostLevelParams": {
                "package_list": '[{"type":"rpm","name":"hbase"},' '{"type":"rpm","name":"yet-another-package"}]',
                "repo_info": '[{"baseUrl":"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0","osType":"centos6","repoId":"HDP-2.0._","repoName":"HDP","defaultBaseUrl":"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0"}]',
                "service_repo_info": '[{"mirrorsList":"abc","osType":"centos6","repoId":"HDP-2.0._","repoName":"HDP","defaultBaseUrl":"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0"}]',
            }
        }

        # Testing config without any keys
        with Environment(".", test_mode=True) as env:
            script = Script()
            Script.config = no_packages_config
            script.install_packages(env)
        resource_dump = pprint.pformat(env.resource_list)
        self.assertEquals(resource_dump, "[]")

        # Testing empty package list
        with Environment(".", test_mode=True) as env:
            script = Script()
            Script.config = empty_config
            script.install_packages(env)
        resource_dump = pprint.pformat(env.resource_list)
        self.assertEquals(resource_dump, "[]")

        # Testing installing of a list of packages
        with Environment(".", test_mode=True) as env:
            Script.config = dummy_config
            script.install_packages("env")
        resource_dump = pprint.pformat(env.resource_list)
        self.assertEqual(
            resource_dump,
            "[Repository['HDP-2.0._'],\n Repository['HDP-2.0._'],\n Package['hbase'],\n Package['yet-another-package']]",
        )
Beispiel #18
0
  def test_set_version(self, get_stack_to_component_mock):
    good_config = {
      'hostLevelParams': {
        'stack_name': "HDP",
        'stack_version': "2.2"
      },
      'commandParams': {
        'version': "2.2.0.0-2041"
      }
    }
    get_stack_to_component_mock.return_value = {"HDP": "kafka-broker"}

    # Testing default workflow
    with Environment(".", test_mode=True) as env:
      script = Script()
      Script.config = good_config
      script.set_version()
    resource_dump = pprint.pformat(env.resource_list)
    self.assertEquals(resource_dump, '[u"Execute[\'(\'/usr/bin/hdp-select\', \'set\', \'kafka-broker\', \'2.2.0.0-2041\')\']"]')

    # Component does not provide mapping
    get_stack_to_component_mock.return_value = {}

    with Environment(".", test_mode=True) as env:
      script = Script()
      Script.config = good_config
      script.set_version()
    resource_dump = pprint.pformat(env.resource_list)
    self.assertEquals(resource_dump, '[]')

    # Component provided mapping, but configuration is not complete (testing fallback)
    get_stack_to_component_mock.return_value = {"HDP": "kafka-broker"}
    bad_config = {}

    with Environment(".", test_mode=True) as env:
      script = Script()
      Script.config = bad_config
      script.set_version()
    resource_dump = pprint.pformat(env.resource_list)
    self.assertEquals(resource_dump, '[]')
  def remove_hdp_21(self, env):
    """
    During Express Upgrade from HDP 2.1 to any higher version (HDP 2.2 or 2.3), the HDP 2.1 bits must be uninstalled.
    This is because /usr/bin/hadoop used to be a shell script in HDP 2.1, but in HDP 2.3 it is
    a symlink to /usr/hdp/current/hadoop-client/bin/hadoop
    so both versions cannot coexist.
    """
    Logger.info("Attempting to remove bits for HDP 2.1")
    config = Script.get_config()

    packages_to_remove = ["zookeeper", "hadoop", "hadoop-lzo", "hadoop-hdfs", "hadoop-libhdfs", "hadoop-yarn", "hadoop-client", "hadoop-mapreduce", "hive", "hive-hcatalog", "hive-jdbc", "hive-webhcat", "hcatalog", "webhcat-tar-hive", "webhcat-tar-pig", "oozie", "oozie-client", "pig", "sqoop", "tez" "falcon", "storm", "flume", "hbase", "phoenix"]
    packages_to_remove.reverse()
    Logger.info("Packages to remove: {0}".format(" ".join(packages_to_remove)))

    for name in packages_to_remove:
      Logger.info("Attempting to remove {0}".format(name))
      Package(name, action="remove")
def link_configs(struct_out_file):
  """
  Links configs, only on a fresh install of HDP-2.3 and higher
  """

  if not Script.is_hdp_stack_greater_or_equal("2.3"):
    Logger.info("Can only link configs for HDP-2.3 and higher.")
    return

  json_version = load_version(struct_out_file)

  if not json_version:
    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
    return

  for k, v in conf_select.PACKAGE_DIRS.iteritems():
    conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
def get_not_managed_resources():
  """
  Returns a list of not managed hdfs paths.
  The result contains all paths from hostLevelParams/not_managed_hdfs_path_list
  except config values from cluster-env/managed_hdfs_resource_property_names
  """
  config = Script.get_config()
  not_managed_hdfs_path_list = json.loads(config['hostLevelParams']['not_managed_hdfs_path_list'])[:]
  managed_hdfs_resource_property_names = config['configurations']['cluster-env']['managed_hdfs_resource_property_names']
  managed_hdfs_resource_property_list = filter(None, [property.strip() for property in managed_hdfs_resource_property_names.split(',')])

  for property_name in managed_hdfs_resource_property_list:
    property_value = default('/configurations/' + property_name, None)

    if property_value == None:
      Logger.warning(("Property {0} from cluster-env/managed_hdfs_resource_property_names not found in configurations. "
                     "Management of this DFS resource will not be forced.").format(property_name))
    else:
      while property_value in not_managed_hdfs_path_list:
        not_managed_hdfs_path_list.remove(property_value)

  return not_managed_hdfs_path_list
Beispiel #22
0
def expect(name, expected_type, default_value=None):
  """
  Expect configuration to be of certain type. If it is not, give a reasonable error message to user.
  
  Optionally if the configuration is not found default_value for it can be returned.
  """
  subdicts = filter(None, name.split('/'))

  curr_dict = Script.get_config()
  for x in subdicts:
    if x in curr_dict:
      curr_dict = curr_dict[x]
    else:
      if default_value:
        return default_value
      return UnknownConfiguration(curr_dict[-1])
  value = curr_dict
  
  if expected_type == bool:
    if isinstance(value, bool):
      return value
    elif isinstance(value, basestring):
      if value != None and value.lower() == "true":
        value = True
      elif value != None and value.lower() == "false":
        value = False
      else:
        raise Fail("Configuration {0} expected to be boolean (true or false), but found '{1}'".format(name, value))
    else:
      type_name = type(value).__name__
      raise Fail("Configuration {0} expected to be boolean (true or false), but found instance of unknown type '{1}'".format(name, type_name))
  elif expected_type in [int, long, float]:
    try:
      value = expected_type(value)
    except (ValueError, TypeError):
      raise Fail("Configuration {0} expected to be number, but found '{1}'".format(name, value))
  return value
Beispiel #23
0
import functools
import os

from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.script import Script
from resource_management.libraries.functions.version import format_stack_version

import status_params

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

install_dir = config['configurations']['metron-env']['install_dir']
download_url = config['configurations']['metron-env']['download_url']
filename = download_url.split('/')[-1]
version_dir = filename.replace('.tar.gz', '').replace('.tgz', '')

hostname = config['hostname']
metron_home = status_params.metron_home
metron_version = config['configurations']['metron-env']['metron_version']

parsers = status_params.parsers
parser_error_topic = config['configurations']['metron-parsers-env'][
    'parser_error_topic']
geoip_hdfs_dir = "/apps/metron/geo/default/"
def setup_ranger_admin(upgrade_type=None):
    import params

    if upgrade_type is None:
        upgrade_type = Script.get_upgrade_type(
            default("/commandParams/upgrade_type", ""))

    ranger_home = params.ranger_home
    ranger_conf = params.ranger_conf

    Directory(ranger_conf,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    copy_jdbc_connector(ranger_home)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}/{check_db_connection_jar_name}")),
        mode=0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
        cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
    else:
        cp = cp + os.pathsep + format("{driver_curl_target}")
    cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")

    db_connection_check_command = format(
        "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}"
    )

    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
        env_dict = {'LD_LIBRARY_PATH': params.ld_lib_path}

    Execute(db_connection_check_command,
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            tries=5,
            try_sleep=10,
            environment=env_dict)

    Execute(
        ('ln', '-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'),
         format('{ranger_home}/conf')),
        not_if=format("ls {ranger_home}/conf"),
        only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
        sudo=True)

    if upgrade_type is not None:
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')

        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    Directory(
        format('{ranger_home}/'),
        owner=params.unix_user,
        group=params.unix_group,
        recursive_ownership=True,
    )

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    if params.stack_supports_pid:
        File(
            format('{ranger_conf}/ranger-admin-env-piddir.sh'),
            content=format(
                "export RANGER_PID_DIR_PATH={ranger_pid_dir}\nexport RANGER_USER={unix_user}"
            ),
            owner=params.unix_user,
            group=params.unix_group,
            mode=0755)

    Directory(params.admin_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True,
              cd_access='a',
              mode=0755)

    File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
         content=format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    if os.path.isfile(params.ranger_admin_default_file):
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.ranger_admin_default_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.security_app_context_file):
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.security_app_context_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)

    if upgrade_type is not None and params.stack_supports_config_versioning:
        if os.path.islink('/usr/bin/ranger-admin'):
            Link('/usr/bin/ranger-admin', action="delete")

        Link('/usr/bin/ranger-admin',
             to=format('{ranger_home}/ews/ranger-admin-services.sh'))

    if default(
            "/configurations/ranger-admin-site/ranger.authentication.method",
            "") == 'PAM':
        d = '/etc/pam.d'
        if os.path.isdir(d):
            if os.path.isfile(os.path.join(d, 'ranger-admin')):
                Logger.info('ranger-admin PAM file already exists.')
            else:
                File(format('{d}/ranger-admin'),
                     content=Template('ranger_admin_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
            if os.path.isfile(os.path.join(d, 'ranger-remote')):
                Logger.info('ranger-remote PAM file already exists.')
            else:
                File(format('{d}/ranger-remote'),
                     content=Template('ranger_remote_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
        else:
            Logger.error(
                "Unable to use PAM authentication, /etc/pam.d/ directory does not exist."
            )

    Execute(('ln', '-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),
             '/usr/bin/ranger-admin'),
            not_if=format("ls /usr/bin/ranger-admin"),
            only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
            sudo=True)

    # remove plain-text password from xml configs

    ranger_admin_site_copy = {}
    ranger_admin_site_copy.update(
        params.config['configurations']['ranger-admin-site'])
    for prop in params.ranger_admin_password_properties:
        if prop in ranger_admin_site_copy:
            ranger_admin_site_copy[prop] = "_"

    XmlConfig("ranger-admin-site.xml",
              conf_dir=ranger_conf,
              configurations=ranger_admin_site_copy,
              configuration_attributes=params.config['configurationAttributes']
              ['ranger-admin-site'],
              owner=params.unix_user,
              group=params.unix_group,
              mode=0644)

    Directory(
        os.path.join(ranger_conf, 'ranger_jaas'),
        mode=0700,
        owner=params.unix_user,
        group=params.unix_group,
    )

    if params.stack_supports_ranger_log4j:
        File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=InlineTemplate(params.admin_log4j),
             mode=0644)

    do_keystore_setup(upgrade_type=upgrade_type)

    create_core_site_xml(ranger_conf)

    if params.stack_supports_ranger_kerberos and params.security_enabled:
        if params.is_hbase_ha_enabled and params.ranger_hbase_plugin_enabled:
            XmlConfig(
                "hbase-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hbase-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hbase-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)

        if params.is_namenode_ha_enabled and params.ranger_hdfs_plugin_enabled:
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)
Beispiel #25
0
regarding copyright ownership.  The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
from resource_management.libraries.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import default, format

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()


#script
check_status_script = "{0}/checkStatus.sh".format(tmp_dir)

#process name
proc_nimbus_name = "com.tencent.jstorm.daemon.nimbus.NimbusServer"
proc_supervisor_name = "com.tencent.jstorm.daemon.supervisor.Supervisor"
proc_ui_server_name = "com.tencent.jstorm.ui.core.UIServer"

Beispiel #26
0
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.get_bare_principal import get_bare_principal

# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
    'RANGER_ADMIN': 'ranger-admin',
    'RANGER_USERSYNC': 'ranger-usersync',
    'RANGER_TAGSYNC': 'ranger-tagsync'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
                                                     "RANGER_ADMIN")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()

stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)

upgrade_marker_file = format("{tmp_dir}/rangeradmin_ru.inprogress")

xml_configurations_supported = config['configurations']['ranger-env'][
Beispiel #27
0
#!/usr/bin/env python

from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
import functools
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.resources import HdfsResource
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources

# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()

wormhole_user = config['configurations']['wormhole-env']['wormhole_user']
log_dir = config['configurations']['wormhole-env']['log_dir']
pid_dir = config['configurations']['wormhole-env']['pid_dir']

hostname = config['agentLevelParams']['hostname']
java64_home = config['ambariLevelParams']['java_home']

install_dir = stack_root + '/wormhole'
download_url = config['configurations']['wormhole-env']['download_url']
filename = download_url.split('/')[-1]
version_dir = filename.replace('.tar.gz',
                               '').replace('.tgz', '').replace('.tar.bz2', '')

conf_content = config['configurations']['wormhole-env']['conf_content']
cluster_id = config['configurations']['wormhole-env']['cluster_id']
db_url = config['configurations']['wormhole-env']['db_url']
cluster_name = config['clusterName']
flink_home = stack_root + '/flink'
Beispiel #28
0
"""

"""

from resource_management.libraries.script import Script
from resource_management.libraries.functions import format


app = 'unloadCron'
app_exec = format('{app}.pl')

app_root = '/var/lib/unload'
app_sbin = format('{app_root}/sbin')
app_bin  = format('{app_root}/bin')
app_start = 'unload_start.sh'

config = Script.get_config()

base_dir = config['configurations']['unload-env']['base.dir']
user = config['configurations']['unload-env']['unload_user']
group = config['configurations']['unload-env']['user_group']

concurrency = config['configurations']['unload-site']['concurrency']
throttle = config['configurations']['unload-site']['throttle']
garbage_seconds = config['configurations']['unload-site']['garbage.seconds']
Beispiel #29
0
    def actionexecute(self, env):
        resolve_ambari_config()

        # Parse parameters from command json file.
        config = Script.get_config()

        host_name = socket.gethostname()
        version = default('/roleParams/version', None)

        # These 2 variables are optional
        service_package_folder = default('/roleParams/service_package_folder',
                                         None)
        hooks_folder = default('/roleParams/hooks_folder', None)

        tasks = json.loads(config['roleParams']['tasks'])
        if tasks:
            for t in tasks:
                task = ExecuteTask(t)
                Logger.info(str(task))

                # If a (script, function) exists, it overwrites the command.
                if task.script and task.function:
                    file_cache = FileCache(agent_config)

                    server_url_prefix = default(
                        '/hostLevelParams/jdk_location', "")

                    if service_package_folder and hooks_folder:
                        command_paths = {
                            "commandParams": {
                                "service_package_folder":
                                service_package_folder,
                                "hooks_folder": hooks_folder
                            }
                        }

                        base_dir = file_cache.get_service_base_dir(
                            command_paths, server_url_prefix)
                    else:
                        base_dir = file_cache.get_custom_actions_base_dir(
                            server_url_prefix)

                    script_path = os.path.join(base_dir, task.script)
                    if not os.path.exists(script_path):
                        message = "Script %s does not exist" % str(script_path)
                        raise Fail(message)

                    # Notice that the script_path is now the fully qualified path, and the
                    # same command-#.json file is used.
                    # Also, the python wrapper is used, since it sets up the correct environment variables
                    command_params = [
                        "/usr/bin/ambari-python-wrap", script_path,
                        task.function, self.command_data_file, self.basedir,
                        self.stroutfile, self.logging_level,
                        Script.get_tmp_dir()
                    ]

                    task.command = "source /var/lib/ambari-agent/ambari-env.sh ; " + " ".join(
                        command_params)
                    # Replace redundant whitespace to make the unit tests easier to validate
                    task.command = re.sub("\s+", " ", task.command).strip()

                if task.command:
                    task.command = replace_variables(task.command, host_name,
                                                     version)
                    shell.checked_call(task.command,
                                       logoutput=True,
                                       quiet=True)
Beispiel #30
0
See the License for the specific language governing permissions and
limitations under the License.

"""

import os

from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

dfs_type = default("/commandParams/dfs_type", "")

is_parallel_execution_enabled = int(
    default("/agentConfigParams/agent/parallel_execution", 0)) == 1

sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# current host stack version
current_version = default("/hostLevelParams/current_version", None)
Beispiel #31
0
#!/usr/bin/env python

from resource_management.libraries.script import Script

config = Script.get_config()

airflow_user = "******"
airflow_group = "airflow"
airflow_home = config['configurations']['airflow-core-site']['airflow_home']

dirs = [
    airflow_home, config['configurations']['airflow-core-site']['dags_folder'],
    config['configurations']['airflow-core-site']['base_log_folder'],
    config['configurations']['airflow-core-site']['plugins_folder'],
    config['configurations']['airflow-scheduler-site']
    ['child_process_log_directory']
]
Beispiel #32
0
from resource_management.libraries.script import Script
from resource_management.libraries.functions.version import format_stack_version, get_major_version
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import get_kinit_path
from resource_management.core.exceptions import Fail

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()

stack_name = default("/clusterLevelParams/stack_name", None)
version = default("/commandParams/version", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

stack_supports_config_versioning = check_stack_feature(
Beispiel #33
0
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature

# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'RANGER_ADMIN' : 'ranger-admin',
  'RANGER_USERSYNC' : 'ranger-usersync',
  'RANGER_TAGSYNC' : 'ranger-tagsync'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "RANGER_ADMIN")

config  = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()

stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

upgrade_marker_file = format("{tmp_dir}/rangeradmin_ru.inprogress")

xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
Beispiel #34
0
limitations under the License.

"""

from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version

from resource_management.core.system import System
from ambari_commons.os_check import OSCheck

config = Script.get_config()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# default hadoop params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"

# HDP 2.2+ params
if Script.is_hdp_stack_greater_or_equal("2.2"):
  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"

  # not supported in HDP 2.2+
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.script import Script
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature

import status_params

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

hdp_version = default("/commandParams/version", None)

hostname = config['hostname']
metron_home = status_params.metron_home
metron_apps_hdfs_dir = config['configurations']['metron-env']['metron_apps_hdfs_dir']

parsers = status_params.parsers
parser_error_topic = config['configurations']['metron-parsers-env']['parser_error_topic']
geoip_hdfs_dir = metron_apps_hdfs_dir + "/geo/default/"
asn_hdfs_dir = metron_apps_hdfs_dir + "/asn/default/"
hbase_coprocessor_local_dir = format("{metron_home}/coprocessor")
hbase_coprocessor_hdfs_dir = metron_apps_hdfs_dir + "/coprocessor"
metron_user = status_params.metron_user
limitations under the License.

"""

from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_kinit_path import get_kinit_path
from resource_management.libraries.script import Script

# a map of the Ambari role to the component name
# for use with /usr/hdp/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'SQOOP' : 'sqoop-client'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SQOOP")

config = Script.get_config()
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

stack_name = default("/hostLevelParams/stack_name", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

# default hadoop params
sqoop_conf_dir = "/usr/lib/sqoop/conf"
sqoop_lib = "/usr/lib/sqoop/lib"
Beispiel #37
0
def _call_command(command, logoutput=False, cwd=None, env=None, wait_for_finish=True, timeout=None, user=None):
  # TODO implement timeout, wait_for_finish
  Logger.info("Executing %s" % (command))
  if user:
    domain, username = UserHelper.parse_user_name(user, ".")

    proc_token = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY | TOKEN_ADJUST_PRIVILEGES)

    old_states = []

    privileges = [
      SE_ASSIGNPRIMARYTOKEN_NAME,
      SE_INCREASE_QUOTA_NAME,
    ]

    for priv in privileges:
      old_states.append(QueryPrivilegeState(proc_token, priv))
      AdjustPrivilege(proc_token, priv)
      QueryPrivilegeState(proc_token, priv)

    user_token = LogonUser(username, domain, Script.get_password(user), win32con.LOGON32_LOGON_SERVICE,
                           win32con.LOGON32_PROVIDER_DEFAULT)
    env_token = DuplicateTokenEx(user_token, SecurityIdentification, TOKEN_QUERY, TokenPrimary)
    # getting updated environment for impersonated user and merge it with custom env
    current_env = CreateEnvironmentBlock(env_token, False)
    current_env = _merge_env(current_env, env)

    si = STARTUPINFO()
    out_handle, err_handle, out_file, err_file = _create_tmp_files(current_env)
    ok, si.hStdInput = _safe_duplicate_handle(GetStdHandle(STD_INPUT_HANDLE))
    if not ok:
      raise Exception("Unable to create StdInput for child process")
    ok, si.hStdOutput = _safe_duplicate_handle(out_handle)
    if not ok:
      raise Exception("Unable to create StdOut for child process")
    ok, si.hStdError = _safe_duplicate_handle(err_handle)
    if not ok:
      raise Exception("Unable to create StdErr for child process")

    Logger.debug("Redirecting stdout to '{0}', stderr to '{1}'".format(out_file.name, err_file.name))

    si.dwFlags = win32con.STARTF_USESTDHANDLES
    si.lpDesktop = ""

    try:
      info = CreateProcessAsUser(user_token, None, command, None, None, 1, win32con.CREATE_NO_WINDOW, current_env, cwd, si)
      hProcess, hThread, dwProcessId, dwThreadId = info
      hThread.Close()

      try:
        WaitForSingleObject(hProcess, INFINITE)
      except KeyboardInterrupt:
        pass
      out, err = _get_files_output(out_file, err_file)
      code = GetExitCodeProcess(hProcess)
    finally:
      for priv in privileges:
        old_state = old_states.pop(0)
        AdjustPrivilege(proc_token, priv, old_state)
  else:
    # getting updated environment for current process and merge it with custom env
    cur_token = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY)
    current_env = CreateEnvironmentBlock(cur_token, False)
    current_env = _merge_env(current_env, env)
    proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
                            cwd=cwd, env=current_env, shell=False)
    out, err = proc.communicate()
    code = proc.returncode

  if logoutput and out:
    Logger.info(out)
  if logoutput and err:
    Logger.info(err)
  return code, out, err
Beispiel #38
0
import status_params

from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
from resource_management.libraries.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)
version = default("/commandParams/version", None)

storm_component_home_dir = status_params.storm_component_home_dir
conf_dir = status_params.conf_dir

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
stack_is_hdp22_or_further = Script.is_hdp_stack_greater_or_equal("2.2")

# default hadoop params
Beispiel #39
0
import re

import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.

from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.version import compare_versions
from ambari_commons.os_check import OSCheck


config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
jce_location = config['hostLevelParams']['jdk_location']
jdk_name = default("/hostLevelParams/jdk_name", None)
java_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])

ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

security_enabled = config['configurations']['cluster-env']['security_enabled']
Beispiel #40
0
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_architecture import get_architecture
from ambari_commons.constants import AMBARI_SUDO_BINARY

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_root = Script.get_stack_root()

architecture = get_architecture()

dfs_type = default("/commandParams/dfs_type", "")

artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jdk_name = default("/ambariLevelParams/jdk_name", None)
java_home = config['ambariLevelParams']['java_home']
java_version = expect("/ambariLevelParams/java_version", int)
jdk_location = config['ambariLevelParams']['jdk_location']

hadoop_custom_extensions_enabled = default(