Пример #1
0
def get_role_component_current_stack_version():
  """
  Gets the current HDP version of the component that this role command is for.
  :return:  the current HDP version of the specified component or None
  """
  role = default("/role", "")
  role_command =  default("/roleCommand", "")

  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
  Logger.info("Checking version for {0} via {1}".format(role, stack_selector_name))
  if role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
    stack_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]
  else:
    stack_select_component = get_package_name()

  if stack_select_component is None:
    if not role:
      Logger.error("No role information available.")
    elif not role.lower().endswith("client"):
      Logger.error("Mapping unavailable for role {0}. Skip checking its version.".format(role))
    return None

  current_stack_version = get_stack_version(stack_select_component)

  if current_stack_version is None:
    Logger.warning("Unable to determine {0} version for {1}".format(
      stack_selector_name, stack_select_component))
  else:
    Logger.info("{0} is currently at version {1}".format(
      stack_select_component, current_stack_version))

  return current_stack_version
Пример #2
0
def get_role_component_current_stack_version():
  """
  Gets the current HDP version of the component that this role command is for.
  :return:  the current HDP version of the specified component or None
  """
  stack_select_component = None
  role = default("/role", "")
  role_command =  default("/roleCommand", "")
  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)

  if role in SERVER_ROLE_DIRECTORY_MAP:
    stack_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
  elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
    stack_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]

  if stack_select_component is None:
    return None

  current_stack_version = get_stack_version(stack_select_component)

  if current_stack_version is None:
    Logger.warning("Unable to determine {0} version for {1}".format(
      stack_selector_name, stack_select_component))
  else:
    Logger.info("{0} is currently at version {1}".format(
      stack_select_component, current_stack_version))

  return current_stack_version
Пример #3
0
def get_role_component_current_stack_version():
    """
  Gets the current HDP version of the component that this role command is for.
  :return:  the current HDP version of the specified component or None
  """
    stack_select_component = None
    role = default("/role", "")
    role_command = default("/roleCommand", "")
    stack_selector_name = stack_tools.get_stack_tool_name(
        stack_tools.STACK_SELECTOR_NAME)

    if role in SERVER_ROLE_DIRECTORY_MAP:
        stack_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
    elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
        stack_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]

    if stack_select_component is None:
        return None

    current_stack_version = get_stack_version(stack_select_component)

    if current_stack_version is None:
        Logger.warning("Unable to determine {0} version for {1}".format(
            stack_selector_name, stack_select_component))
    else:
        Logger.info("{0} is currently at version {1}".format(
            stack_select_component, current_stack_version))

    return current_stack_version
Пример #4
0
  def _create_config_links_if_necessary(self, stack_id, stack_version):
    """
    Sets up the required structure for /etc/<component>/conf symlinks and <stack-root>/current
    configuration symlinks IFF the current stack is < HDP 2.3+ and the new stack is >= HDP 2.3

    stack_id:  stack id, ie HDP-2.3
    stack_version:  version to set, ie 2.3.0.0-1234
    """
    if stack_id is None:
      Logger.info("Cannot create config links when stack_id is not defined")
      return

    args = stack_id.upper().split('-')
    if len(args) != 2:
      Logger.info("Unrecognized stack id {0}, cannot create config links".format(stack_id))
      return

    target_stack_version = args[1]
    if not (target_stack_version and check_stack_feature(StackFeature.CONFIG_VERSIONING, target_stack_version)):
      Logger.info("Configuration symlinks are not needed for {0}".format(stack_version))
      return

    for package_name, directories in conf_select.get_package_dirs().iteritems():
      # if already on HDP 2.3, then we should skip making conf.backup folders
      if self.current_stack_version_formatted and check_stack_feature(StackFeature.CONFIG_VERSIONING, self.current_stack_version_formatted):
        conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME)
        Logger.info("The current cluster stack of {0} does not require backing up configurations; "
                    "only {1} versioned config directories will be created.".format(stack_version, conf_selector_name))
        # only link configs for all known packages
        conf_select.select(self.stack_name, package_name, stack_version, ignore_errors = True)
      else:
        # link configs and create conf.backup folders for all known packages
        # this will also call conf-select select
        conf_select.convert_conf_directories_to_symlinks(package_name, stack_version, directories,
          skip_existing_links = False, link_to = "backup")
Пример #5
0
  def _create_config_links_if_necessary(self, stack_id, stack_version):
    """
    Sets up the required structure for /etc/<component>/conf symlinks and <stack-root>/current
    configuration symlinks IFF the current stack is < HDP 2.3+ and the new stack is >= HDP 2.3

    stack_id:  stack id, ie HDP-2.3
    stack_version:  version to set, ie 2.3.0.0-1234
    """
    if stack_id is None:
      Logger.info("Cannot create config links when stack_id is not defined")
      return

    args = stack_id.upper().split('-')
    if len(args) != 2:
      Logger.info("Unrecognized stack id {0}, cannot create config links".format(stack_id))
      return

    target_stack_version = args[1]
    if not (target_stack_version and check_stack_feature(StackFeature.CONFIG_VERSIONING, target_stack_version)):
      Logger.info("Configuration symlinks are not needed for {0}".format(stack_version))
      return

    # After upgrading hdf-select package from HDF-2.X to HDF-3.Y, we need to create this symlink
    if self.stack_name.upper() == "HDF" \
            and not os.path.exists("/usr/bin/conf-select") and os.path.exists("/usr/bin/hdfconf-select"):
      Link("/usr/bin/conf-select", to = "/usr/bin/hdfconf-select")

    for package_name, directories in conf_select.get_package_dirs().iteritems():
      conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME)
      Logger.info("The current cluster stack of {0} does not require backing up configurations; "
                  "only {1} versioned config directories will be created.".format(stack_version, conf_selector_name))
      # only link configs for all known packages
      conf_select.select(self.stack_name, package_name, stack_version, ignore_errors = True)
Пример #6
0
def get_stack_version_before_install(component_name):
  """
  Works in the similar way to '<stack-selector-tool> status component',
  but also works for not yet installed packages.
  
  Note: won't work if doing initial install.
  """
  stack_root = Script.get_stack_root()
  component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", component_name)
  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
  if os.path.islink(component_dir):
    stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
    if match is None:
      Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name))
      return None # lazy fail
    return stack_version
  else:
    return None
Пример #7
0
def get_stack_version_before_install(component_name):
  """
  Works in the similar way to '<stack-selector-tool> status component',
  but also works for not yet installed packages.
  
  Note: won't work if doing initial install.
  """
  stack_root = Script.get_stack_root()
  component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", component_name)
  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
  if os.path.islink(component_dir):
    stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
    if match is None:
      Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name))
      return None # lazy fail
    return stack_version
  else:
    return None
Пример #8
0
def get_stack_version(package_name):
    """
  @param package_name, name of the package, from which, function will try to get stack version
  """

    stack_selector_path = stack_tools.get_stack_tool_name(
        stack_tools.STACK_SELECTOR_NAME)

    if not os.path.exists(stack_selector_path):
        Logger.info(
            'Skipping get_stack_version since " + stack_selector_tool + " is not yet available'
        )
        return None  # lazy fail

    try:
        command = 'ambari-python-wrap {stack_selector_path} status {package_name}'.format(
            stack_selector_path=stack_selector_path, package_name=package_name)
        return_code, stack_output = shell.call(command, timeout=20)
    except Exception, e:
        Logger.error(str(e))
        raise Fail(
            'Unable to execute " + stack_selector_path + " command to retrieve the version.'
        )
Пример #9
0
def get_hadoop_conf_dir(force_latest_on_upgrade=False):
    """
  Gets the shared hadoop conf directory using:
  1.  Start with /etc/hadoop/conf
  2.  When the stack is greater than HDP-2.2, use <stack-root>/current/hadoop-client/conf
  3.  Only when doing a RU and HDP-2.3 or higher, use the value as computed
      by <conf-selector-tool>.  This is in the form <stack-root>/VERSION/hadoop/conf to make sure
      the configs are written in the correct place. However, if the component itself has
      not yet been upgraded, it should use the hadoop configs from the prior version.
      This will perform an <stack-selector-tool> status to determine which version to use.
  :param force_latest_on_upgrade:  if True, then force the returned path to always
  be that of the upgrade target version, even if <stack-selector-tool> has not been called. This
  is primarily used by hooks like before-ANY to ensure that hadoop environment
  configurations are written to the correct location since they are written out
  before the <stack-selector-tool>/<conf-selector-tool> would have been called.
  """
    hadoop_conf_dir = "/etc/hadoop/conf"
    stack_name = None
    stack_root = Script.get_stack_root()
    version = None
    allow_setting_conf_select_symlink = False

    if not Script.in_stack_upgrade():
        # During normal operation, the HDP stack must be 2.3 or higher
        if Script.is_stack_greater_or_equal("2.2"):
            hadoop_conf_dir = os.path.join(stack_root, "current",
                                           "hadoop-client", "conf")

        if Script.is_stack_greater_or_equal("2.3"):
            hadoop_conf_dir = os.path.join(stack_root, "current",
                                           "hadoop-client", "conf")
            stack_name = default("/hostLevelParams/stack_name", None)
            version = default("/commandParams/version", None)

            if stack_name and version:
                version = str(version)
                allow_setting_conf_select_symlink = True
    else:
        # During an upgrade/downgrade, which can be a Rolling or Express Upgrade, need to calculate it based on the version
        '''
    Whenever upgrading to HDP 2.2, or downgrading back to 2.2, need to use /etc/hadoop/conf
    Whenever upgrading to HDP 2.3, or downgrading back to 2.3, need to use a versioned hadoop conf dir

    Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________
    Normal|        | 2.2    |                       | Use /etc/hadoop/conf
    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to <stack-root>/current/hadoop-client/conf
    EU    | 2.1    | 2.3    | Upgrade               | Use versioned <stack-root>/current/hadoop-client/conf
          |        |        | No Downgrade Allowed  | Invalid
    EU/RU | 2.2    | 2.2.*  | Any                   | Use <stack-root>/current/hadoop-client/conf
    EU/RU | 2.2    | 2.3    | Upgrade               | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
          |        |        | Downgrade             | Use <stack-root>/current/hadoop-client/conf
    EU/RU | 2.3    | 2.3.*  | Any                   | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
    '''

        # The method "is_stack_greater_or_equal" uses "stack_version" which is the desired stack, e.g., 2.2 or 2.3
        # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
        # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is
        # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
        if Script.is_stack_greater_or_equal("2.2"):
            hadoop_conf_dir = os.path.join(stack_root, "current",
                                           "hadoop-client", "conf")

            # This contains the "version", including the build number, that is actually used during a stack upgrade and
            # is the version upgrading/downgrading to.
            stack_info = stack_select._get_upgrade_stack()

            if stack_info is not None:
                stack_name = stack_info[0]
                version = stack_info[1]
            else:
                raise Fail("Unable to get parameter 'version'")

            Logger.info(
                "In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use."
                .format(stack_name, version))
            # This is the version either upgrading or downgrading to.
            if compare_versions(format_stack_version(version), "2.3.0.0") >= 0:
                # Determine if <stack-selector-tool> has been run and if not, then use the current
                # hdp version until this component is upgraded.
                if not force_latest_on_upgrade:
                    current_stack_version = stack_select.get_role_component_current_stack_version(
                    )
                    if current_stack_version is not None and version != current_stack_version:
                        version = current_stack_version
                        stack_selector_name = stack_tools.get_stack_tool_name(
                            stack_tools.STACK_SELECTOR_NAME)
                        Logger.info(
                            "{0} has not yet been called to update the symlink for this component, "
                            "keep using version {1}".format(
                                stack_selector_name, current_stack_version))

                # Only change the hadoop_conf_dir path, don't <conf-selector-tool> this older version
                hadoop_conf_dir = os.path.join(stack_root, version, "hadoop",
                                               "conf")
                Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))

                allow_setting_conf_select_symlink = True

    if allow_setting_conf_select_symlink:
        # If not in the middle of an upgrade and on HDP 2.3 or higher, or if
        # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the
        # symlink for /etc/hadoop/conf.
        # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf
        # Therefore, any calls to <conf-selector-tool> will fail.
        # For that reason, if the hadoop conf directory exists, then make sure it is set.
        if os.path.exists(hadoop_conf_dir):
            conf_selector_name = stack_tools.get_stack_tool_name(
                stack_tools.CONF_SELECTOR_NAME)
            Logger.info(
                "The hadoop conf dir {0} exists, will call {1} on it for version {2}"
                .format(hadoop_conf_dir, conf_selector_name, version))
            select(stack_name, "hadoop", version)

    Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
    return hadoop_conf_dir
Пример #10
0
def get_hadoop_conf_dir(force_latest_on_upgrade=False):
  """
  Gets the shared hadoop conf directory using:
  1.  Start with /etc/hadoop/conf
  2.  When the stack is greater than HDP-2.2, use <stack-root>/current/hadoop-client/conf
  3.  Only when doing a RU and HDP-2.3 or higher, use the value as computed
      by <conf-selector-tool>.  This is in the form <stack-root>/VERSION/hadoop/conf to make sure
      the configs are written in the correct place. However, if the component itself has
      not yet been upgraded, it should use the hadoop configs from the prior version.
      This will perform an <stack-selector-tool> status to determine which version to use.
  :param force_latest_on_upgrade:  if True, then force the returned path to always
  be that of the upgrade target version, even if <stack-selector-tool> has not been called. This
  is primarily used by hooks like before-ANY to ensure that hadoop environment
  configurations are written to the correct location since they are written out
  before the <stack-selector-tool>/<conf-selector-tool> would have been called.
  """
  hadoop_conf_dir = "/etc/hadoop/conf"
  stack_name = None
  stack_root = Script.get_stack_root()
  stack_version = Script.get_stack_version()
  version = None
  allow_setting_conf_select_symlink = False

  if not Script.in_stack_upgrade():
    # During normal operation, the HDP stack must be 2.3 or higher
    if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")

    if stack_version and check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version):
      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
      stack_name = default("/hostLevelParams/stack_name", None)
      version = default("/commandParams/version", None)

      if stack_name and version:
        version = str(version)
        allow_setting_conf_select_symlink = True
  else:
    # During an upgrade/downgrade, which can be a Rolling or Express Upgrade, need to calculate it based on the version
    '''
    Whenever upgrading to HDP 2.2, or downgrading back to 2.2, need to use /etc/hadoop/conf
    Whenever upgrading to HDP 2.3, or downgrading back to 2.3, need to use a versioned hadoop conf dir

    Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________
    Normal|        | 2.2    |                       | Use /etc/hadoop/conf
    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to <stack-root>/current/hadoop-client/conf
    EU    | 2.1    | 2.3    | Upgrade               | Use versioned <stack-root>/current/hadoop-client/conf
          |        |        | No Downgrade Allowed  | Invalid
    EU/RU | 2.2    | 2.2.*  | Any                   | Use <stack-root>/current/hadoop-client/conf
    EU/RU | 2.2    | 2.3    | Upgrade               | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
          |        |        | Downgrade             | Use <stack-root>/current/hadoop-client/conf
    EU/RU | 2.3    | 2.3.*  | Any                   | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
    '''

    # The "stack_version" is the desired stack, e.g., 2.2 or 2.3
    # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
    # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is 
    # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
    if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")

      # This contains the "version", including the build number, that is actually used during a stack upgrade and
      # is the version upgrading/downgrading to.
      stack_info = stack_select._get_upgrade_stack()

      if stack_info is not None:
        stack_name = stack_info[0]
        version = stack_info[1]
      else:
        raise Fail("Unable to get parameter 'version'")
      
      Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
      # This is the version either upgrading or downgrading to.
      if version and check_stack_feature(StackFeature.CONFIG_VERSIONING, version):
        # Determine if <stack-selector-tool> has been run and if not, then use the current
        # hdp version until this component is upgraded.
        if not force_latest_on_upgrade:
          current_stack_version = stack_select.get_role_component_current_stack_version()
          if current_stack_version is not None and version != current_stack_version:
            version = current_stack_version
            stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
            Logger.info("{0} has not yet been called to update the symlink for this component, "
                        "keep using version {1}".format(stack_selector_name, current_stack_version))

        # Only change the hadoop_conf_dir path, don't <conf-selector-tool> this older version
        hadoop_conf_dir = os.path.join(stack_root, version, "hadoop", "conf")
        Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))

        allow_setting_conf_select_symlink = True

  if allow_setting_conf_select_symlink:
    # If not in the middle of an upgrade and on HDP 2.3 or higher, or if
    # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the
    # symlink for /etc/hadoop/conf.
    # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf
    # Therefore, any calls to <conf-selector-tool> will fail.
    # For that reason, if the hadoop conf directory exists, then make sure it is set.
    if os.path.exists(hadoop_conf_dir):
      conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME)
      Logger.info("The hadoop conf dir {0} exists, will call {1} on it for version {2}".format(
              hadoop_conf_dir, conf_selector_name, version))
      select(stack_name, "hadoop", version)

  Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
  return hadoop_conf_dir
Пример #11
0
def get_hadoop_conf_dir(force_latest_on_upgrade=False):
    """
  Gets the shared hadoop conf directory using:
  1.  Start with /etc/hadoop/conf
  2.  When the stack is greater than HDP-2.2, use <stack-root>/current/hadoop-client/conf
  3.  Only when doing a RU and HDP-2.3 or higher, use the value as computed
      by <conf-selector-tool>.  This is in the form <stack-root>/VERSION/hadoop/conf to make sure
      the configs are written in the correct place. However, if the component itself has
      not yet been upgraded, it should use the hadoop configs from the prior version.
      This will perform an <stack-selector-tool> status to determine which version to use.
  :param force_latest_on_upgrade:  if True, then force the returned path to always
  be that of the upgrade target version, even if <stack-selector-tool> has not been called. This
  is primarily used by hooks like before-ANY to ensure that hadoop environment
  configurations are written to the correct location since they are written out
  before the <stack-selector-tool>/<conf-selector-tool> would have been called.
  """
    hadoop_conf_dir = "/etc/hadoop/conf"
    stack_name = None
    stack_root = Script.get_stack_root()
    stack_version = Script.get_stack_version()
    version = None

    if not Script.in_stack_upgrade():
        # During normal operation, the HDP stack must be 2.3 or higher
        if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                 stack_version):
            hadoop_conf_dir = os.path.join(stack_root, "current",
                                           "hadoop-client", "conf")

        if stack_version and check_stack_feature(
                StackFeature.CONFIG_VERSIONING, stack_version):
            hadoop_conf_dir = os.path.join(stack_root, "current",
                                           "hadoop-client", "conf")
            stack_name = default("/hostLevelParams/stack_name", None)
            version = default("/commandParams/version", None)

            if not os.path.islink(hadoop_conf_dir) and stack_name and version:
                version = str(version)
    else:
        # The "stack_version" is the desired stack, e.g., 2.2 or 2.3
        # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
        # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is
        # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
        if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                 stack_version):
            hadoop_conf_dir = os.path.join(stack_root, "current",
                                           "hadoop-client", "conf")

            # This contains the "version", including the build number, that is actually used during a stack upgrade and
            # is the version upgrading/downgrading to.
            stack_info = stack_select._get_upgrade_stack()

            if stack_info is None:
                raise Fail(
                    "Unable to retrieve the upgrade/downgrade stack information from the request"
                )

            stack_name = stack_info[0]
            version = stack_info[1]

            Logger.info(
                "An upgrade/downgrade for {0}-{1} is in progress, determining which hadoop conf dir to use."
                .format(stack_name, version))

            # This is the version either upgrading or downgrading to.
            if version and check_stack_feature(StackFeature.CONFIG_VERSIONING,
                                               version):
                # Determine if <stack-selector-tool> has been run and if not, then use the current
                # hdp version until this component is upgraded.
                if not force_latest_on_upgrade:
                    current_stack_version = stack_select.get_role_component_current_stack_version(
                    )
                    if current_stack_version is not None and version != current_stack_version:
                        version = current_stack_version
                        stack_selector_name = stack_tools.get_stack_tool_name(
                            stack_tools.STACK_SELECTOR_NAME)
                        Logger.info(
                            "{0} has not yet been called to update the symlink for this component, "
                            "keep using version {1}".format(
                                stack_selector_name, current_stack_version))

                # Only change the hadoop_conf_dir path, don't <conf-selector-tool> this older version
                hadoop_conf_dir = os.path.join(stack_root, version, "hadoop",
                                               "conf")
                Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))

    Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
    return hadoop_conf_dir