def get_role_component_current_hdp_version():
  """
  Gets the current HDP version of the component that this role command is for.
  :return:  the current HDP version of the specified component or None
  """
  hdp_select_component = None
  role = default("/role", "")
  role_command =  default("/roleCommand", "")

  if role in SERVER_ROLE_DIRECTORY_MAP:
    hdp_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
  elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
    hdp_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]

  if hdp_select_component is None:
    return None

  current_hdp_version = get_hdp_version(hdp_select_component)

  if current_hdp_version is None:
    Logger.warning("Unable to determine hdp-select version for {0}".format(
      hdp_select_component))
  else:
    Logger.info("{0} is currently at version {1}".format(
      hdp_select_component, current_hdp_version))

  return current_hdp_version
def _get_tar_source_and_dest_folder(tarball_prefix):
  """
  :param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
  :return: Returns a tuple of (x, y) after verifying the properties
  """
  component_tar_source_file = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_SOURCE_SUFFIX), None)
  # E.g., /usr/hdp/current/hadoop-client/tez-{{ hdp_stack_version }}.tar.gz

  component_tar_destination_folder = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_DESTINATION_FOLDER_SUFFIX), None)
  # E.g., hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/

  if not component_tar_source_file or not component_tar_destination_folder:
    Logger.warning("Did not find %s tar source file and destination folder properties in cluster-env.xml" %
                   tarball_prefix)
    return None, None

  if component_tar_source_file.find("/") == -1:
    Logger.warning("The tar file path %s is not valid" % str(component_tar_source_file))
    return None, None

  if not component_tar_destination_folder.endswith("/"):
    component_tar_destination_folder = component_tar_destination_folder + "/"

  if not component_tar_destination_folder.startswith("hdfs://"):
    return None, None

  return component_tar_source_file, component_tar_destination_folder
  def actionexecute(self, env):
    config = Script.get_config()

    version = default('/commandParams/version', None)
    stack_name = default('/hostLevelParams/stack_name', "")

    if not version:
      raise Fail("Value is required for '/commandParams/version'")
  
    # other os?
    if OSCheck.is_redhat_family():
      cmd = ('/usr/bin/yum', 'clean', 'all')
      code, out = shell.call(cmd, sudo=True)

    min_ver = format_hdp_stack_version("2.2")
    real_ver = format_hdp_stack_version(version)
    if stack_name == "HDP":
      if compare_versions(real_ver, min_ver) >= 0:
        cmd = ('hdp-select', 'set', 'all', version)
        code, out = shell.call(cmd, sudo=True)

      if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
        # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
        for k, v in conf_select.PACKAGE_DIRS.iteritems():
          for dir_def in v:
            link_config(dir_def['conf_dir'], dir_def['current_dir'])
Example #4
0
def get_current_version(use_upgrading_version_during_upgrade=True):
  """
  Get the effective version to use to copy the tarballs to.
  :param use_upgrading_version_during_upgrade: True, except when the RU/EU hasn't started yet.
  :return: Version, or False if an error occurred.
  """
  upgrade_direction = default("/commandParams/upgrade_direction", None)
  is_stack_upgrade = upgrade_direction is not None
  current_version = default("/hostLevelParams/current_version", None)
  Logger.info("Default version is {0}".format(current_version))
  if is_stack_upgrade:
    if use_upgrading_version_during_upgrade:
      # This is the version going to. In the case of a downgrade, it is the lower version.
      current_version = default("/commandParams/version", None)
      Logger.info("Because this is a Stack Upgrade, will use version {0}".format(current_version))
    else:
      Logger.info("This is a Stack Upgrade, but keep the version unchanged.")
  else:
    if current_version is None:
      # During normal operation, the first installation of services won't yet know about the version, so must rely
      # on <stack-selector> to get it.
      stack_version = _get_single_version_from_stack_select()
      if stack_version:
        Logger.info("Will use stack version {0}".format(stack_version))
        current_version = stack_version

  if current_version is None:
    message_suffix = "during stack %s" % str(upgrade_direction) if is_stack_upgrade else ""
    Logger.warning("Cannot copy tarball because unable to determine current version {0}.".format(message_suffix))
    return False

  return current_version
Example #5
0
  def test_execution(self, cache_mock, get_config_mock, get_tmp_dir_mock, get_config_file_mock, os_path_exists_mock, call_mock):
    # Mock the config objects
    json_file_path = os.path.join(self.get_custom_actions_dir(), "ru_execute_tasks_namenode_prepare.json")
    self.assertTrue(os.path.isfile(json_file_path))
    with open(json_file_path, "r") as json_file:
      json_payload = json.load(json_file)

    config_dict = ConfigDictionary(json_payload)

    cache_mock.return_value = "/var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package"
    get_config_mock.return_value = config_dict
    get_tmp_dir_mock.return_value = "/tmp"
    ambari_agent_ini_file_path = os.path.join(self.get_src_folder(), "../../ambari-agent/conf/unix/ambari-agent.ini")
    self.assertTrue(os.path.isfile(ambari_agent_ini_file_path))
    get_config_file_mock.return_value = ambari_agent_ini_file_path

    # Mock os calls
    os_path_exists_mock.return_value = True
    call_mock.side_effect = fake_call   # echo the command

    # Ensure that the json file was actually read.
    stack_name = default("/hostLevelParams/stack_name", None)
    stack_version = default("/hostLevelParams/stack_version", None)
    service_package_folder = default('/roleParams/service_package_folder', None)

    self.assertEqual(stack_name, "HDP")
    self.assertEqual(stack_version, 2.2)
    self.assertEqual(service_package_folder, "common-services/HDFS/2.1.0.2.0/package")

    # Begin the test
    ru_execute = ExecuteUpgradeTasks()
    ru_execute.actionexecute(None)

    call_mock.assert_called_with("source /var/lib/ambari-agent/ambari-env.sh ; /usr/bin/ambari-python-wrap /var/lib/ambari-agent/cache/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py prepare_rolling_upgrade /tmp", logoutput=True, quiet=True)
  def delete_storm_local_data(self, env):
    """
    Deletes Storm data from local directories. This will create a marker file
    with JSON data representing the upgrade stack and request/stage ID. This
    will prevent multiple Storm components on the same host from removing
    the local directories more than once.
    :return:
    """
    import params

    Logger.info('Clearing Storm data from local directories...')

    storm_local_directory = params.local_dir
    if storm_local_directory is None:
      raise Fail("The storm local directory specified by storm-site/storm.local.dir must be specified")

    request_id = default("/requestId", None)
    stage_id = default("/stageId", None)
    stack_version = params.version
    stack_name = params.stack_name

    json_map = {}
    json_map["requestId"] = request_id
    json_map["stageId"] = stage_id
    json_map["stackVersion"] = stack_version
    json_map["stackName"] = stack_name

    temp_directory = params.tmp_dir
    upgrade_file = os.path.join(temp_directory, "storm-upgrade-{0}.json".format(stack_version))

    if os.path.exists(upgrade_file):
      try:
        with open(upgrade_file) as file_pointer:
          existing_json_map = json.load(file_pointer)

        if cmp(json_map, existing_json_map) == 0:
          Logger.info("The storm upgrade has already removed the local directories for {0}-{1} for request {2} and stage {3}".format(
            stack_name, stack_version, request_id, stage_id))

          # nothing else to do here for this as it appears to have already been
          # removed by another component being upgraded
          return

      except:
        Logger.error("The upgrade file {0} appears to be corrupt; removing...".format(upgrade_file))
        File(upgrade_file, action="delete")
    else:
      # delete the upgrade file since it does not match
      File(upgrade_file, action="delete")

    # delete from local directory
    Directory(storm_local_directory, action="delete", recursive=True)

    # recreate storm local directory
    Directory(storm_local_directory, mode=0755, owner = params.storm_user,
      group = params.user_group, recursive = True)

    # the file doesn't exist, so create it
    with open(upgrade_file, 'w') as file_pointer:
      json.dump(json_map, file_pointer, indent=2)
Example #7
0
def get_role_component_current_stack_version():
  """
  Gets the current HDP version of the component that this role command is for.
  :return:  the current HDP version of the specified component or None
  """
  stack_select_component = None
  role = default("/role", "")
  role_command =  default("/roleCommand", "")
  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)

  if role in SERVER_ROLE_DIRECTORY_MAP:
    stack_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
  elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
    stack_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]

  if stack_select_component is None:
    return None

  current_stack_version = get_stack_version(stack_select_component)

  if current_stack_version is None:
    Logger.warning("Unable to determine {0} version for {1}".format(
      stack_selector_name, stack_select_component))
  else:
    Logger.info("{0} is currently at version {1}".format(
      stack_select_component, current_stack_version))

  return current_stack_version
  def actionexecute(self, env):
    resolve_ambari_config()

    # Parse parameters from command json file.
    config = Script.get_config()

    host_name = socket.gethostname()
    version = default('/roleParams/version', None)

    # These 2 variables are optional
    service_package_folder = default('/roleParams/service_package_folder', None)
    hooks_folder = default('/roleParams/hooks_folder', None)

    tasks = json.loads(config['roleParams']['tasks'])
    if tasks:
      for t in tasks:
        task = ExecuteTask(t)
        Logger.info(str(task))

        # If a (script, function) exists, it overwrites the command.
        if task.script and task.function and service_package_folder and hooks_folder:
          file_cache = FileCache(agent_config)
          command_paths = {"commandParams":
                                 {"service_package_folder": service_package_folder,
                                  "hooks_folder": hooks_folder
                                 }
                              }
          server_url_prefix = default('/hostLevelParams/jdk_location', "")
          base_dir = file_cache.get_service_base_dir(command_paths, server_url_prefix)
          script_path = os.path.join(base_dir, task.script)
          if not os.path.exists(script_path):
            message = "Script %s does not exist" % str(script_path)
            raise Fail(message)

          # Notice that the script_path is now the fully qualified path, and the
          # same command-#.json file is used.
          # Also, the python wrapper is used, since it sets up the correct environment variables
          command_params = ["/usr/bin/ambari-python-wrap",
                            script_path,
                            task.function,
                            self.command_data_file,
                            self.basedir,
                            self.stroutfile,
                            self.logging_level,
                            Script.get_tmp_dir()]

          task.command = " ".join(command_params)
          # Replace redundant whitespace to make the unit tests easier to validate
          task.command = re.sub("\s+", " ", task.command).strip()

        if task.command:
          task.command = replace_variables(task.command, host_name, version)
          code, out = shell.call(task.command)
          Logger.info("Command: %s\nCode: %s, Out: %s" % (task.command, str(code), str(out)))
          if code != 0:
            raise Fail(out)
Example #9
0
 def set_version(self):
   from resource_management.libraries.functions.default import default
   stack_name = default("/hostLevelParams/stack_name", None)
   version = default("/commandParams/version", None)
   stack_version_unformatted = str(default("/hostLevelParams/stack_version", ""))
   hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
   stack_to_component = self.get_stack_to_component()
   if stack_to_component:
     component_name = stack_to_component[stack_name] if stack_name in stack_to_component else None
     if component_name and stack_name and version and \
             compare_versions(format_hdp_stack_version(hdp_stack_version), '2.2.0.0') >= 0:
       Execute(('/usr/bin/hdp-select', 'set', component_name, version),
               sudo = True)
Example #10
0
def _get_upgrade_stack():
  """
  Gets the stack name and stack version if an upgrade is currently in progress.
  :return:  the stack name and stack version as a tuple, or None if an
  upgrade is not in progress.
  """
  from resource_management.libraries.functions.default import default
  direction = default("/commandParams/upgrade_direction", None)
  stack_name = default("/hostLevelParams/stack_name", None)
  stack_version = default("/commandParams/version", None)

  if direction and stack_name and stack_version:
    return (stack_name, stack_version)

  return None
def post_upgrade_check():
    """
  Ensure all journal nodes are up and quorum is established
  :return:
  """
    import params

    Logger.info("Ensuring Journalnode quorum is established")

    if params.security_enabled:
        Execute(params.jn_kinit_cmd, user=params.hdfs_user)

    time.sleep(5)
    hdfs_roll_edits()
    time.sleep(5)

    all_journal_node_hosts = default("/clusterHostInfo/journalnode_hosts", [])

    if len(all_journal_node_hosts) < 3:
        raise Fail("Need at least 3 Journalnodes to maintain a quorum")

    try:
        namenode_ha = namenode_ha_state.NamenodeHAState()
    except ValueError, err:
        raise Fail("Could not retrieve Namenode HA addresses. Error: " + str(err))
Example #12
0
def setup_atlas_hook(service_name, service_props, atlas_hook_filepath, owner, group):
  """
  Generate the atlas-application.properties.xml file by merging the service_props with the Atlas application-properties.
  :param service_name: Service Name to identify if it is a client-only service, which will generate slightly different configs.
  :param service_props: Atlas configs specific to this service that must be merged.
  :param atlas_hook_filepath: Config file to write, e.g., /etc/falcon/conf/atlas-application.properties.xml
  :param owner: File owner
  :param group: File group
  """
  import params
  atlas_props = default('/configurations/application-properties', {})

  if has_atlas_in_cluster():
    # Take the subset
    merged_props = {}
    shared_props = SHARED_ATLAS_HOOK_CONFIGS.copy()
    if service_name in NON_CLIENT_SERVICES:
      shared_props = shared_props.union(SHARED_ATLAS_HOOK_SECURITY_CONFIGS_FOR_NON_CLIENT_SERVICE)

    for prop in shared_props:
      if prop in atlas_props:
        merged_props[prop] = atlas_props[prop]

    merged_props.update(service_props)

    Logger.info(format("Generating Atlas Hook config file {atlas_hook_filepath}"))
    PropertiesFile(atlas_hook_filepath,
                   properties = merged_props,
                   owner = owner,
                   group = group,
                   mode = 0644)
Example #13
0
def has_atlas_in_cluster():
  """
  Determine if Atlas is installed on the cluster.
  :return: True if Atlas is installed, otherwise false.
  """
  atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
  return len(atlas_hosts) > 0
Example #14
0
 def get_stack_name():
   """
   Gets the name of the stack from hostLevelParams/stack_name.
   :return: a stack name or None
   """
   from resource_management.libraries.functions.default import default
   return default("/hostLevelParams/stack_name", None)
Example #15
0
def check_stack_feature(stack_feature, stack_version):
  """
  Given a stack_feature and a specific stack_version, it validates that the feature is supported by the stack_version.
  IMPORTANT, notice that the mapping of feature to version comes from cluster-env if it exists there.
  :param stack_feature: Feature name to check if it is supported by the stack. For example: "rolling_upgrade"
  :param stack_version: Version of the stack
  :return: Will return True if successful, otherwise, False. 
  """

  from resource_management.libraries.functions.default import default
  from resource_management.libraries.functions.version import compare_versions
  stack_features_config = default("/configurations/cluster-env/stack_features", None)

  if not stack_version:
    Logger.debug("Cannot determine if feature %s is supported since did not provide a stack version." % stack_feature)
    return False

  if stack_features_config:
    data = json.loads(stack_features_config)
    for feature in data["stack_features"]:
      if feature["name"] == stack_feature:
        if "min_version" in feature:
          min_version = feature["min_version"]
          if compare_versions(stack_version, min_version, format = True) < 0:
            return False
        if "max_version" in feature:
          max_version = feature["max_version"]
          if compare_versions(stack_version, max_version, format = True) >= 0:
            return False
        return True
  else:
    raise Fail("Stack features not defined by stack")
        
  return False
Example #16
0
 def get_stack_root():
   """
   Get the stack-specific install root directory
   :return: stack_root
   """
   from resource_management.libraries.functions.default import default
   stack_name = Script.get_stack_name()
   return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
  def unlink_all_configs(self, env):
    """
    Reverses the work performed in link_config. This should only be used when downgrading from
    HDP 2.3 to 2.2 in order to under the symlink work required for 2.3.
    """
    stack_name = default('/hostLevelParams/stack_name', "").upper()
    downgrade_to_version = default('/commandParams/version', None)
    downgrade_from_version = default('/commandParams/downgrade_from_version', None)
    upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)

    # downgrade only
    if upgrade_direction != Direction.DOWNGRADE:
      Logger.warning("Unlinking configurations should only be performed on a downgrade.")
      return

    # HDP only
    if stack_name != "HDP":
      Logger.warning("Unlinking configurations should only be performed on the HDP stack.")
      return

    if downgrade_to_version is None or downgrade_from_version is None:
      Logger.warning("Both 'commandParams/version' and 'commandParams/downgrade_from_version' must be specified to unlink configs on downgrade.")
      return

    Logger.info("Unlinking all configs when downgrading from HDP 2.3 to 2.2")

    # normalize the versions
    stack_23 = format_hdp_stack_version("2.3")
    downgrade_to_version = format_hdp_stack_version(downgrade_to_version)
    downgrade_from_version = format_hdp_stack_version(downgrade_from_version)

    # downgrade-to-version must be 2.2 (less than 2.3)
    if compare_versions(downgrade_to_version, stack_23) >= 0:
      Logger.warning("Unlinking configurations should only be performed when downgrading to HDP 2.2")
      return

    # downgrade-from-version must be 2.3+
    if compare_versions(downgrade_from_version, stack_23) < 0:
      Logger.warning("Unlinking configurations should only be performed when downgrading from HDP 2.3 or later")
      return

    # iterate through all directory conf mappings and undo the symlinks
    for key, value in conf_select.PACKAGE_DIRS.iteritems():
      for directory_mapping in value:
        original_config_directory = directory_mapping['conf_dir']
        self._unlink_config(original_config_directory)
Example #18
0
 def should_expose_component_version(self, command_name):
   """
   Analyzes config and given command to determine if stack version should be written
   to structured out. Currently only HDP stack versions >= 2.2 are supported.
   :param command_name: command name
   :return: True or False
   """
   from resource_management.libraries.functions.default import default
   stack_version_unformatted = str(default("/hostLevelParams/stack_version", ""))
   hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
   if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
     if command_name.lower() == "status":
       request_version = default("/commandParams/request_version", None)
       if request_version is not None:
         return True
     else:
       # Populate version only on base commands
       return command_name.lower() == "start" or command_name.lower() == "install" or command_name.lower() == "restart"
   return False
Example #19
0
  def get_component_from_role(role_directory_map, default_role):
    """
    Gets the /usr/hdp/current/<component> component given an Ambari role,
    such as DATANODE or HBASE_MASTER.
    :return:  the component name, such as hbase-master
    """
    from resource_management.libraries.functions.default import default

    command_role = default("/role", default_role)
    if command_role in role_directory_map:
      return role_directory_map[command_role]
    else:
      return role_directory_map[default_role]
  def delete_storm_local_data(self, env):
    """
    Deletes Storm data from local directories. This will create a marker file
    with JSON data representing the upgrade stack and request/stage ID. This
    will prevent multiple Storm components on the same host from removing
    the local directories more than once.
    :return:
    """
    import params

    Logger.info('Clearing Storm data from local directories...')

    storm_local_directory = params.local_dir
    if storm_local_directory is None:
      raise Fail("The storm local directory specified by storm-site/storm.local.dir must be specified")

    request_id = default("/requestId", None)

    stack_name = params.stack_name
    stack_version = params.version
    upgrade_direction = params.upgrade_direction

    json_map = {}
    json_map["requestId"] = request_id
    json_map["stackName"] = stack_name
    json_map["stackVersion"] = stack_version
    json_map["direction"] = upgrade_direction

    temp_directory = params.tmp_dir
    marker_file = os.path.join(temp_directory, "storm-upgrade-{0}.json".format(stack_version))
    Logger.info("Marker file for upgrade/downgrade of Storm, {0}".format(marker_file))

    if os.path.exists(marker_file):
      Logger.info("The marker file exists.")
      try:
        with open(marker_file) as file_pointer:
          existing_json_map = json.load(file_pointer)

        if cmp(json_map, existing_json_map) == 0:
          Logger.info("The storm upgrade has already removed the local directories for {0}-{1} for "
                      "request {2} and direction {3}. Nothing else to do.".format(stack_name, stack_version, request_id, upgrade_direction))

          # Nothing else to do here for this as it appears to have already been
          # removed by another component being upgraded
          return
        else:
          Logger.info("The marker file differs from the new value. Will proceed to delete Storm local dir, "
                      "and generate new file. Current marker file: {0}".format(str(existing_json_map)))
      except Exception, e:
        Logger.error("The marker file {0} appears to be corrupt; removing it. Error: {1}".format(marker_file, str(e)))
        File(marker_file, action="delete")
Example #21
0
  def format_package_name(self, name):
    from resource_management.libraries.functions.default import default
    """
    This function replaces ${stack_version} placeholder into actual version.  If the package
    version is passed from the server, use that as an absolute truth.
    """

    # two different command types put things in different objects.  WHY.
    # package_version is the form W_X_Y_Z_nnnn
    package_version = default("roleParams/package_version", None)
    if not package_version:
      package_version = default("hostLevelParams/package_version", None)

    package_delimiter = '-' if OSCheck.is_ubuntu_family() else '_'

    # The cluster effective version comes down when the version is known after the initial
    # install.  In that case we should not be guessing which version when invoking INSTALL, but
    # use the supplied version to build the package_version
    effective_version = default("commandParams/version", None)
    role_command = default("roleCommand", None)

    if (package_version is None or '*' in package_version) \
        and effective_version is not None and 'INSTALL' == role_command:
      package_version = effective_version.replace('.', package_delimiter).replace('-', package_delimiter)
      Logger.info("Version {0} was provided as effective cluster version.  Using package version {1}".format(effective_version, package_version))

    if package_version:
      stack_version_package_formatted = package_version
      if OSCheck.is_ubuntu_family():
        stack_version_package_formatted = package_version.replace('_', package_delimiter)

    # Wildcards cause a lot of troubles with installing packages, if the version contains wildcards we try to specify it.
    if not package_version or '*' in package_version:
      stack_version_package_formatted = self.get_stack_version_before_packages_installed().replace('.', package_delimiter).replace('-', package_delimiter) if STACK_VERSION_PLACEHOLDER in name else name

    package_name = name.replace(STACK_VERSION_PLACEHOLDER, stack_version_package_formatted)
    
    return package_name
Example #22
0
  def save_component_version_to_structured_out(self):
    """
    :param stack_name: One of HDP, HDPWIN, PHD, BIGTOP.
    :return: Append the version number to the structured out.
    """
    from resource_management.libraries.functions.default import default
    stack_name = default("/hostLevelParams/stack_name", None)
    stack_to_component = self.get_stack_to_component()
    if stack_to_component and stack_name:
      component_name = stack_to_component[stack_name] if stack_name in stack_to_component else None
      component_version = get_component_version(stack_name, component_name)

      if component_version:
        self.put_structured_out({"version": component_version})
Example #23
0
  def save_component_version_to_structured_out(self):
    """
    :param stack_name: One of HDP, HDPWIN, PHD, BIGTOP.
    :return: Append the version number to the structured out.
    """
    stack_name = Script.get_stack_name()
    component_name = self.get_component_name()
    
    if component_name and stack_name:
      component_version = get_component_version(stack_name, component_name)

      if component_version:
        self.put_structured_out({"version": component_version})

        # if repository_version_id is passed, pass it back with the version
        from resource_management.libraries.functions.default import default
        repo_version_id = default("/hostLevelParams/repository_version_id", None)
        if repo_version_id:
          self.put_structured_out({"repository_version_id": repo_version_id})
Example #24
0
def _is_stop_command(config):
  """
  Gets whether this is a STOP command
  :param config:
  :return:
  """
  from resource_management.libraries.functions.default import default

  # STOP commands are the trouble maker as they are intended to stop a service not on the
  # version of the stack being upgrade/downgraded to
  role_command = config["roleCommand"]
  if role_command == _ROLE_COMMAND_STOP:
    return True

  custom_command = default("/hostLevelParams/custom_command", None)
  if role_command == _ROLE_COMMAND_CUSTOM and custom_command == _ROLE_COMMAND_STOP:
    return True

  return False
Example #25
0
def get_stack_tool(name):
  """
  Give a tool selector name get the stack-specific tool name, tool path, tool package
  :param name: tool selector name
  :return: tool_name, tool_path, tool_package
  """
  from resource_management.libraries.functions.default import default
  stack_tools = None
  stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
  if stack_tools_config:
    stack_tools = json.loads(stack_tools_config)

  if not stack_tools or not name or name.lower() not in stack_tools:
    Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
    return (None, None, None)

  tool_config = stack_tools[name.lower()]

  # Return fixed length (tool_name, tool_path tool_package) tuple
  return tuple(pad(tool_config[:3], 3))
def get_not_managed_resources():
  """
  Returns a list of not managed hdfs paths.
  The result contains all paths from hostLevelParams/not_managed_hdfs_path_list
  except config values from cluster-env/managed_hdfs_resource_property_names
  """
  config = Script.get_config()
  not_managed_hdfs_path_list = json.loads(config['hostLevelParams']['not_managed_hdfs_path_list'])[:]
  managed_hdfs_resource_property_names = config['configurations']['cluster-env']['managed_hdfs_resource_property_names']
  managed_hdfs_resource_property_list = filter(None, [property.strip() for property in managed_hdfs_resource_property_names.split(',')])

  for property_name in managed_hdfs_resource_property_list:
    property_value = default('/configurations/' + property_name, None)

    if property_value == None:
      Logger.warning(("Property {0} from cluster-env/managed_hdfs_resource_property_names not found in configurations. "
                     "Management of this DFS resource will not be forced.").format(property_name))
    else:
      while property_value in not_managed_hdfs_path_list:
        not_managed_hdfs_path_list.remove(property_value)

  return not_managed_hdfs_path_list
Example #27
0
  def filter_package_list(self, package_list):
    """
    Here we filter packages that are managed with custom logic in package
    scripts. Usually this packages come from system repositories, and either
     are not available when we restrict repository list, or should not be
    installed on host at all.
    :param package_list: original list
    :return: filtered package_list
    """
    filtered_package_list = []

    # hadoop-lzo package is installed only if LZO compression is enabled
    lzo_packages = ['hadoop-lzo', 'lzo', 'hadoop-lzo-native', 'liblzo2-2', 'hadooplzo']
    has_lzo = False
    io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
    if io_compression_codecs:
      has_lzo = "com.hadoop.compression.lzo" in io_compression_codecs.lower()

    for package in package_list:
      skip_package = False
      # mysql* package logic is managed at HIVE scripts
      if package['name'].startswith('mysql'):
        skip_package = True
      # Ambari metrics packages should not be upgraded during RU
      if package['name'].startswith('ambari-metrics'):
        skip_package = True

      if not has_lzo:
        for lzo_package in lzo_packages:
          if package['name'].startswith(lzo_package):
            skip_package = True
            break

      if not skip_package:
        filtered_package_list.append(package)
    return filtered_package_list
Example #28
0
# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'RANGER_ADMIN' : 'ranger-admin',
  'RANGER_USERSYNC' : 'ranger-usersync',
  'RANGER_TAGSYNC' : 'ranger-tagsync'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "RANGER_ADMIN")

config  = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()

stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)

upgrade_marker_file = format("{tmp_dir}/rangeradmin_ru.inprogress")

xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']

create_db_dbuser = config['configurations']['ranger-env']['create_db_dbuser']

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
Example #29
0
from resource_management.libraries.script import Script
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()

stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

stack_supports_config_versioning = check_stack_feature(
    StackFeature.CONFIG_VERSIONING, version_for_stack_feature_checks)
stack_support_kms_hsm = check_stack_feature(
    StackFeature.RANGER_KMS_HSM_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_kerberos = check_stack_feature(
    StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
Example #30
0
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
from resource_management.libraries.functions import namenode_ha_utils
from resource_management.libraries.functions.namenode_ha_utils import get_properties_for_all_nameservices, namenode_federation_enabled

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

architecture = get_architecture()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
upgrade_direction = default("/commandParams/upgrade_direction", None)
rolling_restart = default("/commandParams/rolling_restart", False)
rolling_restart_safemode_exit_timeout = default(
    "/configurations/cluster-env/namenode_rolling_restart_safemode_exit_timeout",
    None)
stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
agent_stack_retry_on_unavailability = config['ambariLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count",
                                 int)

# there is a stack upgrade which has not yet been finalized; it's currently suspended
upgrade_suspended = default("roleParams/upgrade_suspended", False)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
Example #31
0
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.expect import expect
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames

# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = status_params.stack_name
agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)
version = default("/commandParams/version", None)
component_directory = status_params.component_directory
etc_prefix_dir = "/etc/hbase"

stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
stack_root = status_params.stack_root

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

stack_supports_ranger_kerberos = check_stack_feature(
    StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(
    StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
Example #32
0
from resource_management.libraries.script.script import Script

from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages

import status_params
import os

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
hadoop_lib_home = conf_select.get_hadoop_dir("lib")

#hadoop params
if Script.is_hdp_stack_greater_or_equal("2.2"):
    # oozie-server or oozie-client, depending on role
    oozie_root = status_params.component_directory
Example #33
0
# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
    'HUE_SERVER': 'hue-server',
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
                                                     "HUE_SERVER")
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
# Hue download url
download_url = 'echo https://cdn.gethue.com/downloads/hue-3.11.0.tgz'
hue_version_dir = 'hue-3.11.0'
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
#e.g. /var/lib/ambari-agent/cache/stacks/HDP/$VERSION/services/HUE/package
service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
cluster_name = str(config['clusterName'])
ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']

#hue_apps = ['security','pig','filebrowser','jobbrowser','zookeeper','search','rdbms','metastore','spark','beeswax','jobsub','hbase','oozie','indexer']
hue_hdfs_module_enabled = config['configurations']['hue-env'][
    'hue-hdfs-module-enabled']
hue_yarn_module_enabled = config['configurations']['hue-env'][
    'hue-yarn-module-enabled']
hue_hive_module_enabled = config['configurations']['hue-env'][
    'hue-hive-module-enabled']
hue_hbase_module_enabled = config['configurations']['hue-env'][
    'hue-hbase-module-enabled']
Example #34
0
cluster_name = config['clusterName']

# node hostname
hostname = config["hostname"]

# This is expected to be of the form #.#.#.#
stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted_major = status_params.stack_version_formatted_major

# this is not available on INSTALL action because <stack-selector-tool> is not available
stack_version_formatted = functions.get_stack_version('hive-server2')
major_stack_version = get_major_version(stack_version_formatted_major)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)

# When downgrading the 'version' is pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = upgrade_summary.get_downgrade_from_version("HIVE")

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

# Upgrade direction
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_supports_ranger_kerberos = check_stack_feature(
    StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(
    StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_hive_jdbc_url_change = check_stack_feature(
Example #35
0
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements.  See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.  The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
from ambari_commons import OSCheck
from status_params import *
from resource_management.libraries.functions.default import default

if OSCheck.is_windows_family():
    from params_windows import *
else:
    from params_linux import *

host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
Example #36
0
    'HISTORYSERVER': 'hadoop-mapreduce-historyserver',
    'MAPREDUCE2_CLIENT': 'hadoop-mapreduce-client',
}

YARN_SERVER_ROLE_DIRECTORY_MAP = {
    'APP_TIMELINE_SERVER': 'hadoop-yarn-timelineserver',
    'NODEMANAGER': 'hadoop-yarn-nodemanager',
    'RESOURCEMANAGER': 'hadoop-yarn-resourcemanager',
    'YARN_CLIENT': 'hadoop-yarn-client'
}

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = default("/hostLevelParams/stack_name", None)

# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
hdp_stack_version = version.get_hdp_build_version(hdp_stack_version_major)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

hostname = config['hostname']

# hadoop default parameters
hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
hadoop_bin = "/usr/lib/hadoop/sbin"
hadoop_bin_dir = "/usr/bin"
Example #37
0
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements.  See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.  The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Ambari Agent

"""
from ambari_commons import OSCheck
from resource_management.libraries.functions.default import default

if OSCheck.is_windows_family():
  from params_windows import *
else:
  from params_linux import *

host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

Example #38
0
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.version import format_stack_version

config = Script.get_config()
stack_root = '/opt'
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

knox_conf_dir = '/etc/knox'
knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
knox_pid_file = format("{knox_pid_dir}/gateway.pid")
ldap_pid_file = format("{knox_pid_dir}/ldap.pid")

security_enabled = config['configurations']['cluster-env']['security_enabled']
if security_enabled:
    knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
    knox_principal_name = config['configurations']['knox-env'][
        'knox_principal_name']
else:
    knox_keytab_path = None
    knox_principal_name = None

hostname = config['hostname'].lower()
knox_user = default("/configurations/knox-env/knox_user", "knox")
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
temp_dir = Script.get_tmp_dir()

stack_name = default("/hostLevelParams/stack_name", None)
Example #39
0
import os
import status_params


def get_port_from_url(address):
    if not is_empty(address):
        return address.split(':')[-1]
    else:
        return address


# config object that holds the configurations declared in the -site.xml file
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_version = default("/commandParams/version", None)
sudo = AMBARI_SUDO_BINARY

logsearch_solr_conf = "/etc/ambari-logsearch-solr/conf"
logsearch_server_conf = "/etc/ambari-logsearch-portal/conf"
logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"

logsearch_config_set_dir = format("{logsearch_server_conf}/solr_configsets")

logsearch_solr_port = status_params.logsearch_solr_port
logsearch_solr_piddir = status_params.logsearch_solr_piddir
logsearch_solr_pidfile = status_params.logsearch_solr_pidfile

# logsearch pid file
logsearch_pid_dir = status_params.logsearch_pid_dir
logsearch_pid_file = status_params.logsearch_pid_file
Example #40
0
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.constants import StackFeature
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config

# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()

tmp_dir = Script.get_tmp_dir()
stack_name = status_params.stack_name
upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)
# E.g., 2.3.2.0
version_formatted = format_stack_version(version)

# E.g., 2.3
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

stack_supports_ranger_kerberos = check_stack_feature(
    StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(
    StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
Example #41
0
"""
from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from resource_management.libraries.functions.default import default
from utils import get_bare_principal
from resource_management.libraries.functions.get_hdp_version import get_hdp_version
from resource_management.libraries.functions.is_empty import is_empty

import status_params

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_name = default("/hostLevelParams/stack_name", None)
retryAble = default("/commandParams/command_retry_enabled", False)

version = default("/commandParams/version", None)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# default kafka parameters
kafka_home = '/usr/lib/kafka/'
kafka_bin = kafka_home + '/bin/kafka'
conf_dir = "/etc/kafka/conf"
limits_conf_dir = "/etc/security/limits.d"

kafka_user_nofile_limit = config['configurations']['kafka-env'][
Example #42
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""

from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from resource_management.libraries.functions.default import default
from resource_management import *
from status_params import *

config = Script.get_config()

stack_name = default("/hostLevelParams/stack_name", None)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# hadoop params
if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
    hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"

    # if this is a server action, then use the server binaries; smoke tests
    # use the client binaries
    server_role_dir_mapping = {
        'FALCON_SERVER': 'falcon-server',
Example #43
0
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from status_params import *
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature

# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()

tmp_dir = Script.get_tmp_dir()
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)
# E.g., 2.3.2.0
version_formatted = format_stack_version(version)

# E.g., 2.3
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

# This is the version whose state is CURRENT. During an RU, this is the source version.
# DO NOT format it since we need the build number too.
upgrade_from_version = default("/hostLevelParams/current_version", None)

# server configurations
# Default value used in HDP 2.3.0.0 and earlier.
Example #44
0
YARN_SERVER_ROLE_DIRECTORY_MAP = {
  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
  'YARN_CLIENT' : 'hadoop-yarn-client'
}

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

architecture = get_architecture()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
tarball_map = default("/configurations/cluster-env/tarball_map", None)

config_path = os.path.join(stack_root, "current/hadoop-client/conf")
config_dir = os.path.realpath(config_path)

# This is expected to be of the form #.#.#.#
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted_major = format_stack_version(stack_version_unformatted)
stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')

stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)
Example #45
0
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
    'DRUID_BROKER': 'druid-broker',
    'DRUID_COORDINATOR': 'druid-coordinator',
    'DRUID_HISTORICAL': 'druid-historical',
    'DRUID_MIDDLEMANAGER': 'druid-middlemanager',
    'DRUID_OVERLORD': 'druid-overlord',
    'DRUID_ROUTER': 'druid-router'
}

# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()
tmp_dir = Script.get_tmp_dir()

stack_name = default("/hostLevelParams/stack_name", None)

# stack version
stack_version = default("/commandParams/version", None)

# un-formatted stack version
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])

# default role to coordinator needed for service checks
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
                                                     "DRUID_COORDINATOR")

hostname = config['hostname']
sudo = AMBARI_SUDO_BINARY

# default druid parameters
Example #46
0
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.script import Script
from resource_management.libraries.functions import get_user_call_output

from status_params import *

# server configurations
java_home = config['hostLevelParams']['java_home']
ambari_cluster_name = config['clusterName']
java_version = expect("/hostLevelParams/java_version", int)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

dpprofiler_hosts = default("/clusterHostInfo/dpprofiler_hosts", None)
if type(dpprofiler_hosts) is list:
    dpprofiler_host_name = dpprofiler_hosts[0]
else:
    dpprofiler_host_name = dpprofiler_hosts

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

credential_store_enabled = False
if 'credentialStoreEnabled' in config:
    credential_store_enabled = config['credentialStoreEnabled']
jdk_location = config['hostLevelParams']['jdk_location']
Example #47
0
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
    'SPARK_JOBHISTORYSERVER': 'spark-historyserver',
    'SPARK_CLIENT': 'spark-client',
    'SPARK_THRIFTSERVER': 'spark-thriftserver',
    'LIVY_SERVER': 'livy-server',
    'LIVY_CLIENT': 'livy-client'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
                                                     "SPARK_CLIENT")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
stack_version_unformatted = config['hostLevelParams']['stack_version']
if upgrade_direction == Direction.DOWNGRADE:
    stack_version_unformatted = config['commandParams']['original_stack']
stack_version_formatted = format_stack_version(stack_version_unformatted)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

spark_conf = '/etc/spark/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
Example #48
0
    def __init__(self):
        """
    Initializes all fields by querying the Namenode state.
    Raises a ValueError if unable to construct the object.
    """
        import params

        self.name_service = default(
            "/configurations/hdfs-site/dfs.nameservices", None)
        if not self.name_service:
            raise ValueError("Could not retrieve property dfs.nameservices")

        nn_unique_ids_key = "dfs.ha.namenodes." + str(self.name_service)
        # List of the nn unique ids
        self.nn_unique_ids = default(
            "/configurations/hdfs-site/" + nn_unique_ids_key, None)
        if not self.nn_unique_ids:
            raise ValueError("Could not retrieve property " +
                             nn_unique_ids_key)

        self.nn_unique_ids = self.nn_unique_ids.split(",")
        self.nn_unique_ids = [x.strip() for x in self.nn_unique_ids]

        policy = default("/configurations/hdfs-site/dfs.http.policy",
                         "HTTP_ONLY")
        self.encrypted = policy.upper() == "HTTPS_ONLY"

        jmx_uri_fragment = (
            "https" if self.encrypted else "http"
        ) + "://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
        namenode_http_fragment = "dfs.namenode.http-address.{0}.{1}"
        namenode_https_fragment = "dfs.namenode.https-address.{0}.{1}"

        # Dictionary where the key is the Namenode State (e.g., ACTIVE), and the value is a set of hostnames
        self.namenode_state_to_hostnames = {}

        # Dictionary from nn unique id name to a tuple of (http address, https address)
        self.nn_unique_id_to_addresses = {}
        for nn_unique_id in self.nn_unique_ids:
            http_key = namenode_http_fragment.format(self.name_service,
                                                     nn_unique_id)
            https_key = namenode_https_fragment.format(self.name_service,
                                                       nn_unique_id)

            http_value = default("/configurations/hdfs-site/" + http_key, None)
            https_value = default("/configurations/hdfs-site/" + https_key,
                                  None)
            actual_value = https_value if self.encrypted else http_value
            hostname = actual_value.split(":")[0].strip(
            ) if actual_value and ":" in actual_value else None

            self.nn_unique_id_to_addresses[nn_unique_id] = (http_value,
                                                            https_value)
            try:
                if not hostname:
                    raise Exception(
                        "Could not retrieve hostname from address " +
                        actual_value)

                jmx_uri = jmx_uri_fragment.format(actual_value)
                state = get_value_from_jmx(jmx_uri, "State")

                if not state:
                    raise Exception(
                        "Could not retrieve Namenode state from URL " +
                        jmx_uri)

                state = state.lower()

                if state not in [
                        NAMENODE_STATE.ACTIVE, NAMENODE_STATE.STANDBY
                ]:
                    state = NAMENODE_STATE.UNKNOWN

                if state in self.namenode_state_to_hostnames:
                    self.namenode_state_to_hostnames[state].add(hostname)
                else:
                    hostnames = set([
                        hostname,
                    ])
                    self.namenode_state_to_hostnames[state] = hostnames
            except:
                Logger.error("Could not get namenode state for " +
                             nn_unique_id)
Example #49
0
    def test_kinit(identity, user=None):
        principal = get_property_value(identity, 'principal')
        kinit_path_local = functions.get_kinit_path(
            default('/configurations/kerberos-env/executable_search_paths',
                    None))
        kdestroy_path_local = functions.get_kdestroy_path(
            default('/configurations/kerberos-env/executable_search_paths',
                    None))

        if principal is not None:
            keytab_file = get_property_value(identity, 'keytab_file')
            keytab = get_property_value(identity, 'keytab')
            password = get_property_value(identity, 'password')

            # If a test keytab file is available, simply use it
            if (keytab_file is not None) and (os.path.isfile(keytab_file)):
                command = '%s -k -t %s %s' % (kinit_path_local, keytab_file,
                                              principal)
                Execute(
                    command,
                    user=user,
                )
                return shell.checked_call(kdestroy_path_local)

            # If base64-encoded test keytab data is available; then decode it, write it to a temporary file
            # use it, and then remove the temporary file
            elif keytab is not None:
                (fd, test_keytab_file) = tempfile.mkstemp()
                os.write(fd, base64.b64decode(keytab))
                os.close(fd)

                try:
                    command = '%s -k -t %s %s' % (kinit_path_local,
                                                  test_keytab_file, principal)
                    Execute(
                        command,
                        user=user,
                    )
                    return shell.checked_call(kdestroy_path_local)
                except:
                    raise
                finally:
                    if test_keytab_file is not None:
                        os.remove(test_keytab_file)

            # If no keytab data is available and a password was supplied, simply use it.
            elif password is not None:
                process = subprocess.Popen([kinit_path_local, principal],
                                           stdin=subprocess.PIPE)
                stdout, stderr = process.communicate(password)
                if process.returncode:
                    err_msg = Logger.filter_text(
                        "Execution of kinit returned %d. %s" %
                        (process.returncode, stderr))
                    raise Fail(err_msg)
                else:
                    return shell.checked_call(kdestroy_path_local)
            else:
                return 0, ''
        else:
            return 0, ''
Example #50
0
yum_repo_type = config['configurations']['metron-env']['repo_type']
if yum_repo_type == 'local':
    repo_url = 'file:///localrepo'
else:
    repo_url = config['configurations']['metron-env']['repo_url']

# hadoop params
stack_root = Script.get_stack_root()
hadoop_home_dir = stack_select.get_hadoop_dir("home")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
kafka_home = os.path.join(stack_root, "current", "kafka-broker")
kafka_bin_dir = os.path.join(kafka_home, "bin")

# zookeeper
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
has_zk_host = not len(zk_hosts) == 0
zookeeper_quorum = None
if has_zk_host:
    if 'zoo.cfg' in config['configurations'] and 'clientPort' in config[
            'configurations']['zoo.cfg']:
        zookeeper_clientPort = config['configurations']['zoo.cfg'][
            'clientPort']
    else:
        zookeeper_clientPort = '2181'
    zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(
        config['clusterHostInfo']['zookeeper_hosts'])
    # last port config
    zookeeper_quorum += ':' + zookeeper_clientPort

# Storm
Example #51
0
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK2_CLIENT")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)

sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

spark_conf = '/etc/spark2/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")

if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
  hadoop_home = stack_select.get_hadoop_dir("home")
  spark_conf = format("{stack_root}/current/{component_directory}/conf")
  spark_log_dir = config['configurations']['spark2-env']['spark_log_dir']
  spark_pid_dir = status_params.spark_pid_dir
  spark_home = format("{stack_root}/current/{component_directory}")

spark_daemon_memory = config['configurations']['spark2-env']['spark_daemon_memory']
spark_thrift_server_conf_file = spark_conf + "/spark-thrift-sparkconf.conf"
java_home = config['hostLevelParams']['java_home']
Example #52
0
from resource_management.libraries.functions import StackFeature

config = Script.get_config()
stack_root = status_params.stack_root
stack_name = status_params.stack_name

agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)

version = stack_features.get_stack_feature_version(config)

stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
upgrade_direction = default("/commandParams/upgrade_direction", None)
jdk_location = config['hostLevelParams']['jdk_location']

etc_prefix_dir = "/etc/falcon"

# hadoop params
hadoop_home_dir = stack_select.get_hadoop_dir("home")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")

if check_stack_feature(StackFeature.ROLLING_UPGRADE, version):
    # if this is a server action, then use the server binaries; smoke tests
    # use the client binaries
    server_role_dir_mapping = {
        'FALCON_SERVER': 'falcon-server',
        'FALCON_SERVICE_CHECK': 'falcon-client'
    }
Example #53
0
    def delete_storm_local_data(self, env):
        """
    Deletes Storm data from local directories. This will create a marker file
    with JSON data representing the upgrade stack and request/stage ID. This
    will prevent multiple Storm components on the same host from removing
    the local directories more than once.
    :return:
    """
        import params

        Logger.info('Clearing Storm data from local directories...')

        storm_local_directory = params.local_dir
        if storm_local_directory is None:
            raise Fail(
                "The storm local directory specified by storm-site/storm.local.dir must be specified"
            )

        request_id = default("/requestId", None)
        stage_id = default("/stageId", None)
        stack_version = params.version
        stack_name = params.stack_name

        json_map = {}
        json_map["requestId"] = request_id
        json_map["stageId"] = stage_id
        json_map["stackVersion"] = stack_version
        json_map["stackName"] = stack_name

        temp_directory = params.tmp_dir
        upgrade_file = os.path.join(
            temp_directory, "storm-upgrade-{0}.json".format(stack_version))

        if os.path.exists(upgrade_file):
            try:
                with open(upgrade_file) as file_pointer:
                    existing_json_map = json.load(file_pointer)

                if cmp(json_map, existing_json_map) == 0:
                    Logger.info(
                        "The storm upgrade has already removed the local directories for {0}-{1} for request {2} and stage {3}"
                        .format(stack_name, stack_version, request_id,
                                stage_id))

                    # nothing else to do here for this as it appears to have already been
                    # removed by another component being upgraded
                    return

            except:
                Logger.error(
                    "The upgrade file {0} appears to be corrupt; removing...".
                    format(upgrade_file))
                File(upgrade_file, action="delete")
        else:
            # delete the upgrade file since it does not match
            File(upgrade_file, action="delete")

        # delete from local directory
        Directory(storm_local_directory, action="delete", recursive=True)

        # recreate storm local directory
        Directory(storm_local_directory,
                  mode=0755,
                  owner=params.storm_user,
                  group=params.user_group,
                  recursive=True)

        # the file doesn't exist, so create it
        with open(upgrade_file, 'w') as file_pointer:
            json.dump(json_map, file_pointer, indent=2)
Example #54
0
# a map of the Ambari role to the component name
# for use with /usr/hdp/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'SQOOP' : 'sqoop-client'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SQOOP")

config = Script.get_config()

cluster_name = config['clusterName']

ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]

stack_name = default("/hostLevelParams/stack_name", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

# default hadoop params
sqoop_conf_dir = "/usr/lib/sqoop/conf"
sqoop_lib = "/usr/lib/sqoop/lib"
hadoop_home = '/usr/lib/hadoop'
hbase_home = "/usr/lib/hbase"
hive_home = "/usr/lib/hive"
sqoop_bin_dir = "/usr/bin"
zoo_conf_dir = "/etc/zookeeper"
Example #55
0
    return component_host


hostname = config['hostname']

# Users and Groups
hdfs_superuser = config['configurations']['hadoop-env']['hdfs_user']
user_group = config['configurations']['cluster-env']['user_group']

# Convert hawq_password to unicode for crypt() function in case user enters a numeric password
hawq_password = unicode(config['configurations']['hawq-env']['hawq_password'])

# HAWQ Hostnames
hawqmaster_host = __get_component_host('hawqmaster_hosts')
hawqstandby_host = __get_component_host('hawqstandby_hosts')
hawqsegment_hosts = sorted(default('/clusterHostInfo/hawqsegment_hosts', []))
hawq_master_hosts = [
    host for host in hawqmaster_host, hawqstandby_host if host
]
hawq_all_hosts = sorted(set(hawq_master_hosts + hawqsegment_hosts))

# HDFS
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']

security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
hdfs_principal_name = config['configurations']['hadoop-env'][
    'hdfs_principal_name']
Example #56
0
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

architecture = get_architecture()

# Needed since this writes out the Atlas Hive Hook config file.
cluster_name = config['clusterName']
serviceName = config['serviceName']
role = config['role']

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
upgrade_direction = default("/commandParams/upgrade_direction", None)
agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)

stack_root = status_params.stack_root

# The source stack will be present during a cross-stack upgrade.
# E.g., BigInsights-4.2.5 or HDP-2.6
source_stack = default("/commandParams/source_stack", None)
if source_stack is None:
    source_stack = upgrade_summary.get_source_stack("OOZIE")
import status_params

from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

stack_name = default("/hostLevelParams/stack_name", None)
current_version = default("/hostLevelParams/current_version", None)
component_directory = status_params.component_directory

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

# default parameters
zk_home = "/usr/hdp/2.3.2.0-2950/zookeeper"
zk_bin = "/usr/hdp/2.3.2.0-2950/zookeeper/bin"
zk_cli_shell = "/usr/hdp/2.3.2.0-2950/zookeeper/bin/zkCli.sh"
config_dir = "/etc/zookeeper/conf"

# hadoop parameters for 2.2+
if Script.is_hdp_stack_greater_or_equal("2.2"):
  zk_home = format("/usr/hdp/current/{component_directory}")
Example #58
0
from resource_management import Script
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select

config = Script.get_config()

hostname = config["agentLevelParams"]["hostname"]
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
user_group = config['configurations']['cluster-env']['user_group']
hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
security_enabled = config['configurations']['cluster-env']['security_enabled']

dfs_type = default("/clusterLevelParams/dfs_type", "")
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']

ambari_libs_dir = "/var/lib/ambari-agent/lib"

import functools
#create partial functions with common arguments for every HdfsResource call
Example #59
0
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
from resource_management.libraries.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)
version = default("/commandParams/version", None)

storm_component_home_dir = status_params.storm_component_home_dir
conf_dir = status_params.conf_dir

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
stack_is_hdp22_or_further = Script.is_hdp_stack_greater_or_equal("2.2")

# default hadoop params
rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
storm_bin_dir = "/usr/bin"
storm_lib_dir = "/usr/lib/storm/lib/"
Example #60
0
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
from resource_management.libraries.script.script import Script

import status_params

# server configurations
config = Script.get_config()
exec_tmp_dir = status_params.tmp_dir

# security enabled
security_enabled = status_params.security_enabled

# hdp version
stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

has_secure_user_auth = False
if Script.is_hdp_stack_greater_or_equal("2.3"):
  has_secure_user_auth = True

# configuration directories
conf_dir = status_params.conf_dir
server_conf_dir = status_params.server_conf_dir

# service locations
hadoop_prefix = hdp_select.get_hadoop_dir("home")
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")