def actionexecute(self, env):
    config = Script.get_config()

    version = default('/commandParams/version', None)
    stack_name = default('/hostLevelParams/stack_name', "")

    if not version:
      raise Fail("Value is required for '/commandParams/version'")
  
    # other os?
    if OSCheck.is_redhat_family():
      cmd = ('/usr/bin/yum', 'clean', 'all')
      code, out = shell.call(cmd, sudo=True)

    min_ver = format_hdp_stack_version("2.2")
    real_ver = format_hdp_stack_version(version)
    if stack_name == "HDP":
      if compare_versions(real_ver, min_ver) >= 0:
        cmd = ('hdp-select', 'set', 'all', version)
        code, out = shell.call(cmd, sudo=True)

      if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
        # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
        for k, v in conf_select.PACKAGE_DIRS.iteritems():
          for dir_def in v:
            link_config(dir_def['conf_dir'], dir_def['current_dir'])
示例#2
0
    def actionexecute(self, env):
        config = Script.get_config()

        version = default('/commandParams/version', None)
        stack_name = default('/hostLevelParams/stack_name', "")

        if not version:
            raise Fail("Value is required for '/commandParams/version'")

        # other os?
        if OSCheck.is_redhat_family():
            cmd = ('/usr/bin/yum', 'clean', 'all')
            code, out = shell.call(cmd, sudo=True)

        min_ver = format_hdp_stack_version("2.2")
        real_ver = format_hdp_stack_version(version)
        if stack_name == "HDP":
            if compare_versions(real_ver, min_ver) >= 0:
                cmd = ('ambari-python-wrap', HDP_SELECT, 'set', 'all', version)
                code, out = shell.call(cmd, sudo=True)

            if compare_versions(real_ver,
                                format_hdp_stack_version("2.3")) >= 0:
                # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
                for k, v in conf_select.PACKAGE_DIRS.iteritems():
                    for dir_def in v:
                        link_config(dir_def['conf_dir'],
                                    dir_def['current_dir'])
示例#3
0
    def unlink_all_configs(self, env):
        """
    Reverses the work performed in link_config. This should only be used when downgrading from
    HDP 2.3 to 2.2 in order to under the symlink work required for 2.3.
    """
        stack_name = default('/hostLevelParams/stack_name', "").upper()
        downgrade_to_version = default('/commandParams/version', None)
        downgrade_from_version = default(
            '/commandParams/downgrade_from_version', None)
        upgrade_direction = default("/commandParams/upgrade_direction",
                                    Direction.UPGRADE)

        # downgrade only
        if upgrade_direction != Direction.DOWNGRADE:
            Logger.warning(
                "Unlinking configurations should only be performed on a downgrade."
            )
            return

        # HDP only
        if stack_name != "HDP":
            Logger.warning(
                "Unlinking configurations should only be performed on the HDP stack."
            )
            return

        if downgrade_to_version is None or downgrade_from_version is None:
            Logger.warning(
                "Both 'commandParams/version' and 'commandParams/downgrade_from_version' must be specified to unlink configs on downgrade."
            )
            return

        # normalize the versions
        stack_23 = format_hdp_stack_version("2.3")
        downgrade_to_version = format_hdp_stack_version(downgrade_to_version)
        downgrade_from_version = format_hdp_stack_version(
            downgrade_from_version)

        # downgrade-to-version must be 2.2 (less than 2.3)
        if compare_versions(downgrade_to_version, stack_23) >= 0:
            Logger.warning(
                "Unlinking configurations should only be performed when downgrading to HDP 2.2"
            )
            return

        # downgrade-from-version must be 2.3+
        if compare_versions(downgrade_from_version, stack_23) < 0:
            Logger.warning(
                "Unlinking configurations should only be performed when downgrading from HDP 2.3 or later"
            )
            return

        # iterate through all directory conf mappings and undo the symlinks
        for key, value in conf_select.PACKAGE_DIRS.iteritems():
            for directory_mapping in value:
                original_config_directory = directory_mapping['conf_dir']
                self._unlink_config(original_config_directory)
示例#4
0
 def set_version(self):
   from resource_management.libraries.functions.default import default
   stack_name = default("/hostLevelParams/stack_name", None)
   version = default("/commandParams/version", None)
   stack_version_unformatted = str(default("/hostLevelParams/stack_version", ""))
   hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
   stack_to_component = self.get_stack_to_component()
   if stack_to_component:
     component_name = stack_to_component[stack_name] if stack_name in stack_to_component else None
     if component_name and stack_name and version and \
             compare_versions(format_hdp_stack_version(hdp_stack_version), '2.2.0.0') >= 0:
       Execute(('/usr/bin/hdp-select', 'set', component_name, version),
               sudo = True)
示例#5
0
 def set_version(self):
   from resource_management.libraries.functions.default import default
   stack_name = default("/hostLevelParams/stack_name", None)
   version = default("/commandParams/version", None)
   stack_version_unformatted = str(default("/hostLevelParams/stack_version", ""))
   hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
   stack_to_component = self.get_stack_to_component()
   if stack_to_component:
     component_name = stack_to_component[stack_name] if stack_name in stack_to_component else None
     if component_name and stack_name and version and \
             compare_versions(format_hdp_stack_version(hdp_stack_version), '2.2.0.0') >= 0:
       Execute(('/usr/bin/hdp-select', 'set', component_name, version),
               sudo = True)
  def pre_rolling_restart(self, env):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.2.0') >= 0:
      conf_select.select(params.stack_name, "spark", params.version)
      hdp_select.select("spark-thriftserver", params.version)
示例#7
0
  def pre_rolling_restart(self, env):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Execute(format("hdp-select set spark-historyserver {version}"))
      copy_tarballs_to_hdfs('tez', 'spark-historyserver', params.spark_user, params.hdfs_user, params.user_group)
示例#8
0
 def pre_upgrade_restart(self, env, upgrade_type=None):
   import params
   env.set_params(params)
   if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "storm", params.version)
     hdp_select.select("storm-client", params.version)
     hdp_select.select("storm-nimbus", params.version)
示例#9
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
      # conf_select.select(params.stack_name, "atlas", params.version)
      hdp_select.select("atlas-server", params.version)
示例#10
0
def prestart(env, hdp_component):
    import params

    if params.version and compare_versions(
            format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
        conf_select.select(params.stack_name, "kafka", params.version)
        Execute("hdp-select set {0} {1}".format(hdp_component, params.version))
示例#11
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:

      absolute_backup_dir = None
      if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
        Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir))

        # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
        absolute_backup_dir = upgrade.backup_data()

      # conf-select will change the symlink to the conf folder.
      conf_select.select(params.stack_name, "knox", params.version)
      hdp_select.select("knox-server", params.version)

      # Extract the tar of the old conf folder into the new conf directory
      if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
        conf_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
        if os.path.exists(conf_tar_source_path):
          extract_dir = os.path.realpath(params.knox_conf_dir)
          conf_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
          Logger.info("Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
          Execute(('cp', conf_tar_source_path, conf_tar_dest_path),
                  sudo = True,
          )

          tar_archive.untar_archive(conf_tar_source_path, extract_dir)
          
          File(conf_tar_dest_path,
               action = "delete",
          )
示例#12
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "storm", params.version)
      hdp_select.select("storm-client", params.version)
示例#13
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "sqoop", params.version)
      hdp_select.select("sqoop-client", params.version)
  def _create_config_links_if_necessary(self, stack_id, stack_version):
    """
    Sets up the required structure for /etc/<component>/conf symlinks and /usr/hdp/current
    configuration symlinks IFF the current stack is < HDP 2.3+ and the new stack is >= HDP 2.3

    stack_id:  stack id, ie HDP-2.3
    stack_version:  version to set, ie 2.3.0.0-1234
    """
    if stack_id is None:
      Logger.info("Cannot create config links when stack_id is not defined")
      return

    args = stack_id.upper().split('-')
    if len(args) != 2:
      Logger.info("Unrecognized stack id {0}, cannot create config links".format(stack_id))
      return

    if args[0] != "HDP":
      Logger.info("Unrecognized stack name {0}, cannot create config links".format(args[0]))

    if compare_versions(format_hdp_stack_version(args[1]), "2.3.0.0") < 0:
      Logger.info("Configuration symlinks are not needed for {0}, only HDP-2.3+".format(stack_version))
      return

    for package_name, directories in conf_select.PACKAGE_DIRS.iteritems():
      # if already on HDP 2.3, then we should skip making conf.backup folders
      if self.current_hdp_stack_version and compare_versions(self.current_hdp_stack_version, '2.3') >= 0:
        Logger.info("The current cluster stack of {0} does not require backing up configurations; "
                    "only conf-select versioned config directories will be created.".format(stack_version))
        # only link configs for all known packages
        conf_select.link_component_conf_to_versioned_config(package_name, stack_version)
      else:
        # link configs and create conf.backup folders for all known packages
        conf_select.convert_conf_directories_to_symlinks(package_name, stack_version, directories,
          skip_existing_links = False, link_to = "backup")
示例#15
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing DataNode Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Execute(format("hdp-select set hadoop-hdfs-datanode {version}"))
示例#16
0
def prestart(env, hdp_component):
    import params

    if params.version and compare_versions(
            format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
        conf_select.select(params.stack_name, "hbase", params.version)
        hdp_select.select(hdp_component, params.version)
示例#17
0
def _get_current_hiveserver_version():
    """
  Runs "hive --version" and parses the result in order
  to obtain the current version of hive.

  :return:  the hiveserver2 version, returned by "hive --version"
  """
    import params

    try:
        # When downgrading the source version should be the version we are downgrading from
        if "downgrade" == params.upgrade_direction:
            if not params.downgrade_from_version:
                raise Fail(
                    'The version from which we are downgrading from should be provided in \'downgrade_from_version\''
                )
            source_version = params.downgrade_from_version
        else:
            source_version = params.current_version
        hive_execute_path = _get_hive_execute_path(source_version)
        version_hive_bin = params.hive_bin
        formatted_source_version = format_hdp_stack_version(source_version)
        if formatted_source_version and compare_versions(
                formatted_source_version, "2.2") >= 0:
            version_hive_bin = format('/usr/hdp/{source_version}/hive/bin')
        command = format('{version_hive_bin}/hive --version')
        return_code, hdp_output = shell.call(command,
                                             user=params.hive_user,
                                             path=hive_execute_path)
    except Exception, e:
        Logger.error(str(e))
        raise Fail(
            'Unable to execute hive --version command to retrieve the hiveserver2 version.'
        )
def zookeeper_service(action='start', upgrade_type=None):
  import params

  # This path may be missing after Ambari upgrade. We need to create it.
  if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version \
    and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
    conf_select.select(params.stack_name, "zookeeper", params.current_version)
    hdp_select.select("zookeeper-server", params.version)

  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")

  if action == 'start':
    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
    Execute(daemon_cmd,
            not_if=no_op_test,
            user=params.zk_user
    )

    if params.security_enabled:
      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")

      Execute(kinit_cmd,
              user=params.smokeuser
      )

  elif action == 'stop':
    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
    rm_pid = format("rm -f {zk_pid_file}")
    Execute(daemon_cmd,
            user=params.zk_user
    )
    Execute(rm_pid)
示例#19
0
def _get_directory_mappings_during_upgrade():
  """
  Gets a dictionary of directory to archive name that represents the
  directories that need to be backed up and their output tarball archive targets
  :return:  the dictionary of directory to tarball mappings
  """
  import params

  # Must be performing an Upgrade
  if params.upgrade_direction is None or params.upgrade_direction != Direction.UPGRADE or \
          params.upgrade_from_version is None or params.upgrade_from_version == "":
    Logger.error("Function _get_directory_mappings_during_upgrade() can only be called during a Stack Upgrade in direction UPGRADE.")
    return {}

  # By default, use this for all stacks.
  knox_data_dir = '/var/lib/knox/data'

  if params.stack_name and params.stack_name.upper() == "HDP" and \
          compare_versions(format_hdp_stack_version(params.upgrade_from_version), "2.3.0.0") > 0:
    # Use the version that is being upgraded from.
    knox_data_dir = format('/usr/hdp/{upgrade_from_version}/knox/data')

  # the trailing "/" is important here so as to not include the "conf" folder itself
  directories = {knox_data_dir: BACKUP_DATA_ARCHIVE, params.knox_conf_dir + "/": BACKUP_CONF_ARCHIVE}

  Logger.info(format("Knox directories to backup:\n{directories}"))
  return directories
示例#20
0
def _get_hive_execute_path(hdp_stack_version):
    """
  Returns the exact execute path to use for the given stack-version.
  This method does not return the "current" path
  :param hdp_stack_version: Exact stack-version to use in the new path
  :return: Hive execute path for the exact hdp stack-version
  """
    import params

    hive_execute_path = params.execute_path
    formatted_stack_version = format_hdp_stack_version(hdp_stack_version)
    if formatted_stack_version and compare_versions(formatted_stack_version,
                                                    "2.2") >= 0:
        # hive_bin
        new_hive_bin = format('/usr/hdp/{hdp_stack_version}/hive/bin')
        if (os.pathsep + params.hive_bin) in hive_execute_path:
            hive_execute_path = hive_execute_path.replace(
                os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
        # hadoop_bin_dir
        new_hadoop_bin = hdp_select.get_hadoop_dir_for_stack_version(
            "bin", hdp_stack_version)
        old_hadoop_bin = params.hadoop_bin_dir
        if new_hadoop_bin and len(new_hadoop_bin) > 0 and (
                os.pathsep + old_hadoop_bin) in hive_execute_path:
            hive_execute_path = hive_execute_path.replace(
                os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
    return hive_execute_path
示例#21
0
 def pre_rolling_restart(self, env):
   Logger.info("Executing DataNode Rolling Upgrade pre-restart")
   import params
   env.set_params(params)
   if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "hadoop", params.version)
     hdp_select.select("hadoop-hdfs-datanode", params.version)
示例#22
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Hive Server Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            hdp_select.select("hive-server2", params.version)

            # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
            resource_created = copy_to_hdfs(
                "mapreduce",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped)

            resource_created = copy_to_hdfs(
                "tez",
                params.user_group,
                params.hdfs_user,
                host_sys_prepped=params.host_sys_prepped) or resource_created

            if resource_created:
                params.HdfsResource(None, action="execute")
示例#23
0
    def pre_rolling_restart(self, env):
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            Execute(format("hdp-select set storm-nimbus {version}"))
示例#24
0
 def pre_upgrade_restart(self, env, upgrade_type=None):
   Logger.info("Executing DataNode Stack Upgrade pre-restart")
   import params
   env.set_params(params)
   if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "hadoop", params.version)
     hdp_select.select("hadoop-hdfs-datanode", params.version)
示例#25
0
文件: upgrade.py 项目: yesugei/ambari
def _get_directory_mappings_during_upgrade():
    """
  Gets a dictionary of directory to archive name that represents the
  directories that need to be backed up and their output tarball archive targets
  :return:  the dictionary of directory to tarball mappings
  """
    import params

    # Must be performing an Upgrade
    if params.upgrade_direction is None or params.upgrade_direction != Direction.UPGRADE or \
            params.upgrade_from_version is None or params.upgrade_from_version == "":
        Logger.error(
            "Function _get_directory_mappings_during_upgrade() can only be called during a Stack Upgrade in direction UPGRADE."
        )
        return {}

    # By default, use this for all stacks.
    knox_data_dir = '/var/lib/knox/data'

    if params.stack_name and params.stack_name.upper() == "HDP" and \
            compare_versions(format_hdp_stack_version(params.upgrade_from_version), "2.3.0.0") > 0:
        # Use the version that is being upgraded from.
        knox_data_dir = format('/usr/hdp/{upgrade_from_version}/knox/data')

    directories = {
        knox_data_dir: BACKUP_DATA_ARCHIVE,
        params.knox_conf_dir + "/": BACKUP_CONF_ARCHIVE
    }  # the trailing "/" is important here so as to not include the "conf" folder itself

    Logger.info(format("Knox directories to backup:\n{directories}"))
    return directories
def _get_current_hiveserver_version():
  """
  Runs "hive --version" and parses the result in order
  to obtain the current version of hive.

  :return:  the hiveserver2 version, returned by "hive --version"
  """
  import params

  try:
    # When downgrading the source version should be the version we are downgrading from
    if "downgrade" == params.upgrade_direction:
      if not params.downgrade_from_version:
        raise Fail('The version from which we are downgrading from should be provided in \'downgrade_from_version\'')
      source_version = params.downgrade_from_version
    else:
      source_version = params.current_version
    hive_execute_path = _get_hive_execute_path(source_version)
    version_hive_bin = params.hive_bin
    formatted_source_version = format_hdp_stack_version(source_version)
    if formatted_source_version and compare_versions(formatted_source_version, "2.2") >= 0:
      version_hive_bin = format('/usr/hdp/{source_version}/hive/bin')
    command = format('{version_hive_bin}/hive --version')
    return_code, hdp_output = shell.call(command, user=params.hive_user, path=hive_execute_path)
  except Exception, e:
    Logger.error(str(e))
    raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
示例#27
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade post-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Execute(format("hdp-select set hadoop-yarn-resourcemanager {version}"))
示例#28
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:

      absolute_backup_dir = None
      if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
        Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir))

        # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
        absolute_backup_dir = upgrade.backup_data()

      # conf-select will change the symlink to the conf folder.
      conf_select.select(params.stack_name, "knox", params.version)
      hdp_select.select("knox-server", params.version)

      # Extract the tar of the old conf folder into the new conf directory
      if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
        conf_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
        if os.path.exists(conf_tar_source_path):
          extract_dir = os.path.realpath(params.knox_conf_dir)
          conf_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
          Logger.info("Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
          Execute(('cp', conf_tar_source_path, conf_tar_dest_path),
                  sudo = True,
          )

          tar_archive.untar_archive(conf_tar_source_path, extract_dir)
          
          File(conf_tar_dest_path,
               action = "delete",
          )
示例#29
0
  def pre_rolling_restart(self, env):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "spark", params.version)
      Execute(format("hdp-select set spark-client {version}"))
示例#30
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Execute(format("hdp-select set zookeeper-server {version}"))
示例#31
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "storm", params.version)
      hdp_select.select("storm-client", params.version)
示例#32
0
def zookeeper_service(action='start', upgrade_type=None):
    import params

    # This path may be missing after Ambari upgrade. We need to create it.
    if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version \
      and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
        conf_select.select(params.stack_name, "zookeeper",
                           params.current_version)
        hdp_select.select("zookeeper-server", params.version)

    cmd = format(
        "env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")

    if action == 'start':
        daemon_cmd = format(
            "source {config_dir}/zookeeper-env.sh ; {cmd} start")
        no_op_test = format(
            "ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1"
        )
        Execute(daemon_cmd, not_if=no_op_test, user=params.zk_user)

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};"
            )

            Execute(kinit_cmd, user=params.smokeuser)

    elif action == 'stop':
        daemon_cmd = format(
            "source {config_dir}/zookeeper-env.sh ; {cmd} stop")
        rm_pid = format("rm -f {zk_pid_file}")
        Execute(daemon_cmd, user=params.zk_user)
        Execute(rm_pid)
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Execute(format("hdp-select set hadoop-yarn-timelineserver {version}"))
示例#34
0
def install_windows_msi(msi_url, save_dir, save_file, hadoop_user,
                        hadoop_password, stack_version):
    global _working_dir
    _working_dir = save_dir
    save_dir = os.path.abspath(save_dir)
    msi_save_dir = save_dir
    # system wide lock to prevent simultaneous installations(when first task failed on timeout)
    install_lock = SystemWideLock("Global\\hdp_msi_lock")
    try:
        # try to acquire lock
        if not install_lock.lock():
            Logger.info(
                "Some other task currently installing hdp.msi, waiting for 10 min for finish"
            )
            if not install_lock.lock(600000):
                raise Fail("Timeout on acquiring lock")
        if _validate_msi_install():
            Logger.info("hdp.msi already installed")
            return

        hdp_stack_version = format_hdp_stack_version(stack_version)
        hdp_22_specific_props = ''
        if hdp_stack_version != "" and compare_versions(
                hdp_stack_version, '2.2') >= 0:
            hdp_22_specific_props = hdp_22.format(hdp_data_dir=hdp_data_dir)

        # install msi
        try:
            download_file(msi_url, os.path.join(msi_save_dir, save_file))
        except:
            raise Fail("Failed to download {url}".format(url=msi_url))
        File(os.path.join(msi_save_dir, "properties.txt"),
             content=cluster_properties.format(
                 hdp_log_dir=hdp_log_dir,
                 hdp_data_dir=hdp_data_dir,
                 local_host=local_host,
                 db_flavor=db_flavor,
                 hdp_22_specific_props=hdp_22_specific_props))
        hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, "hdp.msi"))
        hdp_log_path = os_utils.quote_path(os.path.join(save_dir, "hdp.log"))
        hdp_layout_path = os_utils.quote_path(
            os.path.join(save_dir, "properties.txt"))
        hadoop_password_arg = os_utils.quote_path(hadoop_password)

        Execute(
            INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path,
                                   hdp_log_path=hdp_log_path,
                                   hdp_layout_path=hdp_layout_path,
                                   hadoop_user=hadoop_user,
                                   hadoop_password_arg=hadoop_password_arg))
        reload_windows_env()
        # create additional services manually due to hdp.msi limitaitons
        _ensure_services_created(hadoop_user, hadoop_password)
        _create_symlinks(stack_version)
        # finalizing install
        _write_marker()
        _validate_msi_install()
    finally:
        install_lock.unlock()
示例#35
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing DataNode Rolling Upgrade pre-restart")
        import params

        env.set_params(params)
        if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
            conf_select.select(params.stack_name, "hadoop", params.version)
            hdp_select.select("hadoop-hdfs-datanode", params.version)
示例#36
0
  def unlink_all_configs(self, env):
    """
    Reverses the work performed in link_config. This should only be used when downgrading from
    HDP 2.3 to 2.2 in order to under the symlink work required for 2.3.
    """
    stack_name = default('/hostLevelParams/stack_name', "").upper()
    downgrade_to_version = default('/commandParams/version', None)
    downgrade_from_version = default('/commandParams/downgrade_from_version', None)
    upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)

    # downgrade only
    if upgrade_direction != Direction.DOWNGRADE:
      Logger.warning("Unlinking configurations should only be performed on a downgrade.")
      return

    # HDP only
    if stack_name != "HDP":
      Logger.warning("Unlinking configurations should only be performed on the HDP stack.")
      return

    if downgrade_to_version is None or downgrade_from_version is None:
      Logger.warning("Both 'commandParams/version' and 'commandParams/downgrade_from_version' must be specified to unlink configs on downgrade.")
      return

    Logger.info("Unlinking all configs when downgrading from HDP 2.3 to 2.2")

    # normalize the versions
    stack_23 = format_hdp_stack_version("2.3")
    downgrade_to_version = format_hdp_stack_version(downgrade_to_version)
    downgrade_from_version = format_hdp_stack_version(downgrade_from_version)

    # downgrade-to-version must be 2.2 (less than 2.3)
    if compare_versions(downgrade_to_version, stack_23) >= 0:
      Logger.warning("Unlinking configurations should only be performed when downgrading to HDP 2.2")
      return

    # downgrade-from-version must be 2.3+
    if compare_versions(downgrade_from_version, stack_23) < 0:
      Logger.warning("Unlinking configurations should only be performed when downgrading from HDP 2.3 or later")
      return

    # iterate through all directory conf mappings and undo the symlinks
    for key, value in conf_select.PACKAGE_DIRS.iteritems():
      for directory_mapping in value:
        original_config_directory = directory_mapping['conf_dir']
        self._unlink_config(original_config_directory)
示例#37
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Execute(format("hdp-select set hadoop-mapreduce-historyserver {version}"))
      copy_tarballs_to_hdfs('mapreduce', 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Logger.info("Executing Spark Client Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark", params.version)
      hdp_select.select("spark-client", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "zookeeper", params.version)
      hdp_select.select("zookeeper-server", params.version)
示例#40
0
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            hdp_select.select("spark-historyserver", params.version)

            # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
            # need to copy the tarball, otherwise, copy it.
            if compare_versions(format_hdp_stack_version(params.version),
                                '2.3.0.0') < 0:
                resource_created = copy_to_hdfs("tez", params.user_group,
                                                params.hdfs_user)
                if resource_created:
                    params.HdfsResource(None, action="execute")
示例#41
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Execute(format("hdp-select set hadoop-mapreduce-historyserver {version}"))
      copy_tarballs_to_hdfs('mapreduce', 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade post-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-yarn-resourcemanager", params.version)
示例#43
0
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.3.2.0') >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            hdp_select.select("spark-thriftserver", params.version)
示例#44
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-hdfs-secondarynamenode", params.version)
示例#45
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "zookeeper", params.version)
      hdp_select.select("zookeeper-client", params.version)
示例#46
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      Execute(format("hdp-select set hadoop-hdfs-journalnode {version}"))
示例#47
0
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            hdp_select.select("spark-historyserver", params.version)

            # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
            # need to copy the tarball, otherwise, copy it.

            if params.version and compare_versions(format_hdp_stack_version(params.version), "2.3.0.0") < 0:
                resource_created = copy_to_hdfs(
                    "tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped
                )
                if resource_created:
                    params.HdfsResource(None, action="execute")
示例#48
0
    def upgrade_schema(self, env):
        """
    Executes the schema upgrade binary.  This is its own function because it could
    be called as a standalone task from the upgrade pack, but is safe to run it for each
    metastore instance.

    The metastore schema upgrade requires a database driver library for most
    databases. During an upgrade, it's possible that the library is not present,
    so this will also attempt to copy/download the appropriate driver.
    """
        Logger.info("Upgrading Hive Metastore")
        import params

        env.set_params(params)

        if params.security_enabled:
            kinit_command = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
            Execute(kinit_command, user=params.smokeuser)

        # ensure that the JDBC drive is present for the schema tool; if it's not
        # present, then download it first
        if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
            target_directory = format("/usr/hdp/{version}/hive/lib")
            if not os.path.exists(params.target):
                # download it
                jdbc_connector()

            if params.sqla_db_used:
                target_native_libs_directory = format("{target_directory}/native/lib64")

                Execute(format("yes | {sudo} cp {jars_in_hive_lib} {target_directory}"))

                Directory(target_native_libs_directory, recursive=True)

                Execute(format("yes | {sudo} cp {libs_in_hive_lib} {target_native_libs_directory}"))

                Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
            else:
                Execute(("cp", params.target, target_directory), path=["/bin", "/usr/bin/"], sudo=True)

            File(os.path.join(target_directory, os.path.basename(params.target)), mode=0644)

        # build the schema tool command
        binary = format("/usr/hdp/{version}/hive/bin/schematool")

        # the conf.server directory changed locations between HDP 2.2 and 2.3
        # since the configurations have not been written out yet during an upgrade
        # we need to choose the original legacy location
        schematool_hive_server_conf_dir = params.hive_server_conf_dir
        if params.current_version is not None:
            current_version = format_hdp_stack_version(params.current_version)
            if compare_versions(current_version, "2.3") < 0:
                schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF

        env_dict = {"HIVE_CONF_DIR": schematool_hive_server_conf_dir}

        command = format("{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
        Execute(command, user=params.hive_user, tries=1, environment=env_dict, logoutput=True)
示例#49
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            Logger.info("Executing Spark Client Stack Upgrade pre-restart")
            conf_select.select(params.stack_name, "spark", params.version)
            hdp_select.select("spark-client", params.version)
示例#50
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing Rolling Upgrade post-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hadoop", params.version)
            hdp_select.select("hadoop-yarn-resourcemanager", params.version)
def install_windows_msi(url_base, save_dir, save_files, hadoop_user, hadoop_password, stack_version):
  global _working_dir
  _working_dir = save_dir
  save_dir = os.path.abspath(save_dir)
  msi_save_dir = save_dir
  # system wide lock to prevent simultaneous installations(when first task failed on timeout)
  install_lock = SystemWideLock("Global\\hdp_msi_lock")
  try:
    # try to acquire lock
    if not install_lock.lock():
      Logger.info("Some other task currently installing hdp.msi, waiting for 10 min for finish")
      if not install_lock.lock(600000):
        raise Fail("Timeout on acquiring lock")
    if _validate_msi_install():
      Logger.info("hdp.msi already installed")
      return

    hdp_stack_version = format_hdp_stack_version(stack_version)
    hdp_22_specific_props = ''
    if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
      hdp_22_specific_props = hdp_22.format(hdp_data_dir=hdp_data_dir)

    # MSIs cannot be larger than 2GB. HDPWIN 2.3 needed split in order to accommodate this limitation
    hdp_msi_file = ''
    for save_file in save_files:
      if save_file.lower().endswith(".msi"):
        hdp_msi_file = save_file
      file_url = urlparse.urljoin(url_base, save_file)
      try:
        download_file(file_url, os.path.join(msi_save_dir, save_file))
      except:
        raise Fail("Failed to download {url}".format(url=file_url))

    File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
                                                                                         hdp_data_dir=hdp_data_dir,
                                                                                         local_host=local_host,
                                                                                         db_flavor=db_flavor,
                                                                                         hdp_22_specific_props=hdp_22_specific_props))

    # install msi
    hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file))
    hdp_log_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file[:-3] + "log"))
    hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
    hadoop_password_arg = os_utils.quote_path(hadoop_password)

    Execute(
      INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
                             hadoop_user=hadoop_user, hadoop_password_arg=hadoop_password_arg))
    reload_windows_env()
    # create additional services manually due to hdp.msi limitaitons
    _ensure_services_created(hadoop_user, hadoop_password)
    _create_symlinks(stack_version)
    # finalizing install
    _write_marker()
    _validate_msi_install()
  finally:
    install_lock.unlock()
示例#52
0
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            Execute(format("hdp-select set spark-historyserver {version}"))
            copy_tarballs_to_hdfs('tez', 'spark-historyserver',
                                  params.spark_user, params.hdfs_user,
                                  params.user_group)
示例#53
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing Rolling Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hadoop", params.version)
            hdp_select.select("hadoop-mapreduce-historyserver", params.version)
            copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
            params.HdfsResource(None, action="execute")
示例#54
0
    def start(self, env, rolling_restart=False):
        import params
        env.set_params(params)
        self.configure(env)  # FOR SECURITY

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
            params.HdfsResource(None, action="execute")

        service('historyserver', action='start', serviceName='mapreduce')
示例#55
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
      return

    Logger.info("Executing Flume Rolling Upgrade pre-restart")
    Execute(format("hdp-select set flume-server {version}"))
    flume_upgrade.pre_start_restore()
示例#56
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-mapreduce-historyserver", params.version)
      # MC Hammer said, "Can't touch this"
      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      params.HdfsResource(None, action="execute")
示例#57
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing Metastore Rolling Upgrade pre-restart")
        import params

        env.set_params(params)

        if Script.is_hdp_stack_greater_or_equal("2.3"):
            self.upgrade_schema(env)

        if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            hdp_select.select("hive-metastore", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      hdp_select.select("kafka-broker", params.version)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
      conf_select.select(params.stack_name, "kafka", params.version)

    # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary. 
    if params.current_version and params.version and params.upgrade_direction:
      src_version = dst_version = None
      if params.upgrade_direction == Direction.UPGRADE:
        src_version = format_hdp_stack_version(params.current_version)
        dst_version = format_hdp_stack_version(params.version)
      else:
        # These represent the original values during the UPGRADE direction
        src_version = format_hdp_stack_version(params.version)
        dst_version = format_hdp_stack_version(params.downgrade_from_version)

      if compare_versions(src_version, '2.3.4.0') < 0 and compare_versions(dst_version, '2.3.4.0') >= 0:
        # Calling the acl migration script requires the configs to be present.
        self.configure(env, upgrade_type=upgrade_type)
        upgrade.run_migration(env, upgrade_type)
示例#59
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
      # Therefore, we cannot call this code in that scenario.
      call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]
      for e in call_if:
        if (upgrade_type, params.upgrade_direction) == e:
          conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-hdfs-namenode", params.version)