def configure(self, env, upgrade_type=None):
    import params

    # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary
    if upgrade_type is None:
      restart_type = default("/commandParams/restart_type", "")
      if restart_type.lower() == "rolling_upgrade":
        upgrade_type = UPGRADE_TYPE_ROLLING
      elif restart_type.lower() == "nonrolling_upgrade":
        upgrade_type = UPGRADE_TYPE_NON_ROLLING

    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
      Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
      if compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
        # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
        # oozie, we need to create the symlinks both for server and client.
        # This is required as both need to be pointing to new installed oozie version.

        # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
        hdp_select.select("oozie-client", params.version)
        # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
        hdp_select.select("oozie-server", params.version)

      if compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
        conf_select.select(params.stack_name, "oozie", params.version)

    env.set_params(params)
    oozie(is_server=True)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if Script.is_hdp_stack_greater_or_equal('2.3.0.0'):
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-hdfs-nfs3", params.version)
  def pre_rolling_restart(self, env):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.2.0') >= 0:
      conf_select.select(params.stack_name, "spark", params.version)
      hdp_select.select("spark-thriftserver", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "storm", params.version)
      hdp_select.select("storm-client", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    """
    Performs the tasks that should be done before an upgrade of oozie. This includes:
      - backing up configurations
      - running hdp-select and conf-select
      - restoring configurations
      - preparing the libext directory
    :param env:
    :return:
    """
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
      return

    Logger.info("Executing Oozie Server Stack Upgrade pre-restart")

    OozieUpgrade.backup_configuration()

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "oozie", params.version)
      hdp_select.select("oozie-server", params.version)

    OozieUpgrade.restore_configuration()
    OozieUpgrade.prepare_libext_directory()
示例#6
0
 def pre_upgrade_restart(self, env, upgrade_type=None):
   Logger.info("Executing DataNode Stack Upgrade pre-restart")
   import params
   env.set_params(params)
   if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "hadoop", params.version)
     hdp_select.select("hadoop-hdfs-datanode", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      hdp_select.select("kafka-broker", params.version)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
      conf_select.select(params.stack_name, "kafka", params.version)

    # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary. 
    if params.current_version and params.version and params.upgrade_direction:
      src_version = dst_version = None
      if params.upgrade_direction == Direction.UPGRADE:
        src_version = format_hdp_stack_version(params.current_version)
        dst_version = format_hdp_stack_version(params.version)
      else:
        # These represent the original values during the UPGRADE direction
        src_version = format_hdp_stack_version(params.version)
        dst_version = format_hdp_stack_version(params.downgrade_from_version)

      if compare_versions(src_version, '2.3.4.0') < 0 and compare_versions(dst_version, '2.3.4.0') >= 0:
        # Calling the acl migration script requires the configs to be present.
        self.configure(env, upgrade_type=upgrade_type)
        upgrade.run_migration(env, upgrade_type)
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:

      absolute_backup_dir = None
      if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
        Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir))

        # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
        absolute_backup_dir = upgrade.backup_data()

      # conf-select will change the symlink to the conf folder.
      conf_select.select(params.stack_name, "knox", params.version)
      hdp_select.select("knox-server", params.version)

      # Extract the tar of the old conf folder into the new conf directory
      if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
        conf_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
        if os.path.exists(conf_tar_source_path):
          extract_dir = os.path.realpath(params.knox_conf_dir)
          conf_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
          Logger.info("Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
          Execute(('cp', conf_tar_source_path, conf_tar_dest_path),
                  sudo = True,
          )

          tar_archive.untar_archive(conf_tar_source_path, extract_dir)
          
          File(conf_tar_dest_path,
               action = "delete",
          )
示例#9
0
def zookeeper_service(action='start', rolling_restart=False):
  import params

  # This path may be missing after Ambari upgrade. We need to create it.
  if not rolling_restart and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version:
    conf_select.select(params.stack_name, "zookeeper", params.current_version)
    hdp_select.select("zookeeper-server", params.version)

  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")

  if action == 'start':
    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
    Execute(daemon_cmd,
            not_if=no_op_test,
            user=params.zk_user
    )

    if params.security_enabled:
      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")

      Execute(kinit_cmd,
              user=params.smokeuser
      )

  elif action == 'stop':
    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
    rm_pid = format("rm -f {zk_pid_file}")
    Execute(daemon_cmd,
            user=params.zk_user
    )
    Execute(rm_pid)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    conf_select.select(params.stack_name, "mahout", params.version)
    hdp_select.select("mahout-client", params.version )
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-client", params.version)
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "zookeeper", params.version)
      hdp_select.select("zookeeper-client", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Logger.info("Executing Spark Client Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark", params.version)
      hdp_select.select("spark-client", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "zookeeper", params.version)
      hdp_select.select("zookeeper-server", params.version)
示例#15
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing DataNode Rolling Upgrade pre-restart")
        import params

        env.set_params(params)
        if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
            conf_select.select(params.stack_name, "hadoop", params.version)
            hdp_select.select("hadoop-hdfs-datanode", params.version)
示例#16
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    if Script.is_hdp_stack_greater_or_equal("2.3"):
      # phoenix uses hbase configs
      conf_select.select(params.stack_name, "hbase", params.version)
      hdp_select.select("phoenix-server", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      Logger.info("Executing Spark2 Client Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark", params.version)
      stack_select.select("spark2-client", params.version)
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade post-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-yarn-resourcemanager", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing WebHCat Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
      conf_select.select(params.stack_name, "hive-hcatalog", params.version)
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hive-webhcat", params.version)
示例#20
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
      return

    Logger.info("Executing Oozie Client Rolling Upgrade pre-restart")
    conf_select.select(params.stack_name, "oozie", params.version)
    hdp_select.select("oozie-client", params.version)
示例#21
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-mapreduce-historyserver", params.version)
      # MC Hammer said, "Can't touch this"
      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
      params.HdfsResource(None, action="execute")
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if Script.is_hdp_stack_less_than("2.2"):
      return

    Logger.info("Executing Accumulo Client Rolling Upgrade pre-restart")
    conf_select.select(params.stack_name, "accumulo", params.version)
    hdp_select.select("accumulo-client", params.version)
示例#23
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing Metastore Rolling Upgrade pre-restart")
        import params

        env.set_params(params)

        if Script.is_hdp_stack_greater_or_equal("2.3"):
            self.upgrade_schema(env)

        if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            hdp_select.select("hive-metastore", params.version)
示例#24
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
      # Therefore, we cannot call this code in that scenario.
      call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]
      for e in call_if:
        if (upgrade_type, params.upgrade_direction) == e:
          conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-hdfs-namenode", params.version)
示例#25
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if Script.is_hdp_stack_less_than("2.2"):
      return

    Logger.info("Executing Falcon Server Rolling Upgrade pre-restart")
    conf_select.select(params.stack_name, "falcon", params.version)
    hdp_select.select("falcon-server", params.version)
    falcon_server_upgrade.pre_start_restore()
示例#26
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "slider", params.version)
      hdp_select.select("slider-client", params.version)

      # also set all of the hadoop clients since slider client is upgraded as
      # part of the final "CLIENTS" group and we need to ensure that
      # hadoop-client is also set
      conf_select.select(params.stack_name, "hadoop", params.version)
      hdp_select.select("hadoop-client", params.version)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Stack Upgrade pre-restart")
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
      return

    Logger.info("Executing Falcon Client Stack Upgrade pre-restart")
    conf_select.select(params.stack_name, "falcon", params.version)
    hdp_select.select("falcon-client", params.version)
示例#28
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        # this function should not execute if the version can't be determined or
        # the stack does not support rolling upgrade
        if not (params.version and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.version)):
            return

        Logger.info("Executing Falcon Client Stack Upgrade pre-restart")
        conf_select.select(params.stack_name, "falcon", params.version)
        stack_select.select("falcon-client", params.version)
示例#29
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        # this function should not execute if the version can't be determined or
        # is not at least HDP 2.2.0.0
        if Script.is_hdp_stack_less_than("2.2"):
            return

        Logger.info("Executing Falcon Server Rolling Upgrade pre-restart")
        conf_select.select(params.stack_name, "falcon", params.version)
        hdp_select.select("falcon-server", params.version)
        falcon_server_upgrade.pre_start_restore()
示例#30
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      hdp_select.select("hive-server2", params.version)

      # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
      resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
      resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user) or resource_created
      if resource_created:
        params.HdfsResource(None, action="execute")
示例#31
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing druid-superset Upgrade pre-restart")
        import params

        env.set_params(params)

        if params.stack_version and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.stack_version):
            stack_select.select(self.get_component_name(),
                                params.stack_version)
        if params.stack_version and check_stack_feature(
                StackFeature.CONFIG_VERSIONING, params.stack_version):
            conf_select.select(params.stack_name, "superset",
                               params.stack_version)
示例#32
0
    def pre_rolling_restart(self, env):
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "slider", params.version)
            Execute(format("hdp-select set slider-client {version}"))

            # also set all of the hadoop clients since slider client is upgraded as
            # part of the final "CLIENTS" group and we need to ensure that
            # hadoop-client is also set
            conf_select.select(params.stack_name, "hadoop", params.version)
            Execute(format("hdp-select set hadoop-client {version}"))
示例#33
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Metastore Stack Upgrade pre-restart")
    import params

    env.set_params(params)

    is_upgrade = params.upgrade_direction == Direction.UPGRADE

    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      conf_select.select(params.stack_name, "hive", params.version)
      stack_select.select("hive-metastore", params.version)

    if is_upgrade and params.stack_version_formatted_major and \
            check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
      self.upgrade_schema(env)
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Metastore Stack Upgrade pre-restart")
    import params

    env.set_params(params)

    is_stack_hdp_23 = Script.is_hdp_stack_greater_or_equal("2.3")
    is_upgrade = params.upgrade_direction == Direction.UPGRADE

    if is_stack_hdp_23 and is_upgrade:
      self.upgrade_schema(env)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      hdp_select.select("hive-metastore", params.version)
示例#35
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    Logger.info("Executing Metastore Stack Upgrade pre-restart")
    import params

    env.set_params(params)

    is_stack_hdp_23 = Script.is_hdp_stack_greater_or_equal("2.3")
    is_upgrade = params.upgrade_direction == Direction.UPGRADE

    if is_stack_hdp_23 and is_upgrade:
      self.upgrade_schema(env)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      hdp_select.select("hive-metastore", params.version)
示例#36
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Metastore Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if Script.is_hdp_stack_greater_or_equal("2.3"):
            # ensure that configurations are written out before trying to upgrade the schema
            # since the schematool needs configs and doesn't know how to use the hive conf override
            self.configure(env)
            self.upgrade_schema(env)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            hdp_select.select("hive-metastore", params.version)
示例#37
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        # this function should not execute if the version can't be determined or
        # is not at least IOP 4.0.0.0
        if not params.version or compare_versions(
                format_stack_version(params.version), '4.0.0.0') < 0:
            return

        Logger.info("Executing Flume Stack Upgrade pre-restart")
        conf_select.select(params.stack_name, "flume", params.version)
        stack_select.select("flume-server", params.version)
        if params.upgrade_direction == Direction.UPGRADE:
            flume_upgrade.pre_start_restore()
示例#38
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        node_type_lower = self.nodeType.lower()
        Logger.info(
            format("Executing druid-{node_type_lower} Upgrade pre-restart"))
        import params

        env.set_params(params)

        if params.stack_version and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.stack_version):
            stack_select.select(self.get_component_name(),
                                params.stack_version)
        if params.stack_version and check_stack_feature(
                StackFeature.CONFIG_VERSIONING, params.stack_version):
            conf_select.select(params.stack_name, "druid",
                               params.stack_version)
示例#39
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if not params.version or Script.is_hdp_stack_less_than("2.2"):
      return

    Logger.info("Executing Flume Rolling Upgrade pre-restart")
    conf_select.select(params.stack_name, "flume", params.version)
    hdp_select.select("flume-server", params.version)

    # only restore on upgrade, not downgrade
    if params.upgrade_direction == Direction.UPGRADE:
      flume_upgrade.pre_start_restore()
示例#40
0
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            Execute(format("hdp-select set spark-historyserver {version}"))
            params.HdfsResource(InlineTemplate(
                params.tez_tar_destination).get_content(),
                                type="file",
                                action="create_on_execute",
                                source=params.tez_tar_source,
                                group=params.user_group,
                                owner=params.hdfs_user)
            params.HdfsResource(None, action="execute")
示例#41
0
  def pre_rolling_restart(self, env):
    Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart")
    import params
    env.set_params(params)

    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      conf_select.select(params.stack_name, "hive", params.version)
      Execute(format("hdp-select set hive-server2 {version}"))
      params.HdfsResource(InlineTemplate(params.mapreduce_tar_destination).get_content(),
                          type="file",
                          action="create_on_execute",
                          source=params.mapreduce_tar_source,
                          group=params.user_group,
                          mode=params.tarballs_mode
      )
      params.HdfsResource(None, action="execute")
示例#42
0
    def pre_rolling_restart(self, env):
        import params
        env.set_params(params)

        # this function should not execute if the version can't be determined or
        # is not at least HDP 2.2.0.0
        if not params.version or Script.is_hdp_stack_less_than("2.2"):
            return

        Logger.info("Executing Flume Rolling Upgrade pre-restart")
        conf_select.select(params.stack_name, "flume", params.version)
        hdp_select.select("flume-server", params.version)

        # only restore on upgrade, not downgrade
        if params.upgrade_direction == Direction.UPGRADE:
            flume_upgrade.pre_start_restore()
示例#43
0
  def stack_upgrade_save_new_config(self, env):
    import params
    env.set_params(params)

    conf_select_name = "r4ml"
    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)

    if config_dir:
      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))

      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
      # must change it now so this function can find the Jinja Templates for the service.
      env.config.basedir = base_dir
      conf_select.select(params.stack_name, conf_select_name, params.version)
      self.configure(env, config_dir=config_dir)
示例#44
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '2.2.0.0') >= 0:
            # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
            # Therefore, we cannot call this code in that scenario.
            call_if = [("rolling", "upgrade"), ("rolling", "downgrade"),
                       ("nonrolling", "upgrade")]
            for e in call_if:
                if (upgrade_type, params.upgrade_direction) == e:
                    conf_select.select(params.stack_name, "hadoop",
                                       params.version)
            stack_select.select("hadoop-hdfs-namenode", params.version)
示例#45
0
    def configure(self, env, upgrade_type=None):
        import params

        if upgrade_type == "nonrolling" and params.upgrade_direction == Direction.UPGRADE and \
                params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "oozie", params.version)
            # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
            # oozie, we need to create the symlinks both for server and client.
            # This is required as both need to be pointing to new installed oozie version.

            # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
            hdp_select.select("oozie-client", params.version)
            # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
            hdp_select.select("oozie-server", params.version)
        env.set_params(params)

        oozie(is_server=True)
示例#46
0
def _link_configs(package, version, old_conf, link_conf):
  """
  Link a specific package's configuration directory
  """

  if not os.path.exists(old_conf):
    Logger.debug("Skipping {0} as it does not exist.".format(old_conf))
    return

  # check if conf is a link to the target already
  if os.path.islink(old_conf):
    Logger.debug("{0} is already a link to {1}".format(old_conf, os.path.realpath(old_conf)))
    return

  # make backup dir and copy everything in case configure() was called after install()
  old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
  old_conf_copy = os.path.join(old_parent, "conf.install")
  Execute(("cp", "-R", "-p", old_conf, old_conf_copy),
          not_if = format("test -e {old_conf_copy}"),
          sudo = True,
  )

  versioned_conf = conf_select.create("HDP", package, version, dry_run = True)

  Logger.info("New conf directory is {0}".format(versioned_conf))

  # make new conf dir and copy everything in case configure() was called after install()
  if not os.path.exists(versioned_conf):
    conf_select.create("HDP", package, version)
    Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
            only_if = format("ls {old_conf}/*")
    )
    
  # make /usr/hdp/<version>/hadoop/conf point to the versioned config.
  # /usr/hdp/current is already set
  conf_select.select("HDP", package, version)

  # no more references to /etc/[component]/conf
  Directory(old_conf,
    action="delete",
  )

  # link /etc/[component]/conf -> /usr/hdp/current/[component]-client/conf
  Link(old_conf,
    to = link_conf
  )
示例#47
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            hdp_select.select("hive-server2", params.version)

            # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
            resource_created = copy_to_hdfs("mapreduce", params.user_group,
                                            params.hdfs_user)
            resource_created = copy_to_hdfs(
                "tez", params.user_group, params.hdfs_user) or resource_created
            if resource_created:
                params.HdfsResource(None, action="execute")
示例#48
0
    def pre_rolling_restart(self, env):
        import params

        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "spark", params.version)
            hdp_select.select("spark-historyserver", params.version)

            # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
            # need to copy the tarball, otherwise, copy it.
            if compare_versions(format_hdp_stack_version(params.version),
                                '2.3.0.0') < 0:
                resource_created = copy_to_hdfs("tez", params.user_group,
                                                params.hdfs_user)
                if resource_created:
                    params.HdfsResource(None, action="execute")
示例#49
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        # this function should not execute if the version can't be determined or
        # the stack does not support rolling upgrade
        if not (params.version and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.version)):
            return

        Logger.info("Executing Flume Stack Upgrade pre-restart")
        conf_select.select(params.stack_name, "flume", params.version)
        stack_select.select("flume-server", params.version)

        # only restore on upgrade, not downgrade
        if params.upgrade_direction == Direction.UPGRADE:
            flume_upgrade.pre_start_restore()
示例#50
0
    def pre_rolling_restart(self, env):
        import params
        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:

            absolute_backup_dir = None
            if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
                Logger.info("Backing up directories. Initial conf folder: %s" %
                            os.path.realpath(params.knox_conf_dir))

                # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
                absolute_backup_dir = upgrade.backup_data()

            # conf-select will change the symlink to the conf folder.
            conf_select.select(params.stack_name, "knox", params.version)
            hdp_select.select("knox-server", params.version)

            # Extract the tar of the old conf folder into the new conf directory
            if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
                conf_tar_source_path = os.path.join(
                    absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
                if os.path.exists(conf_tar_source_path):
                    extract_dir = os.path.realpath(params.knox_conf_dir)
                    conf_tar_dest_path = os.path.join(
                        extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
                    Logger.info(
                        "Copying %s into %s file." %
                        (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
                    Execute("cp %s %s" %
                            (conf_tar_source_path, conf_tar_dest_path))

                    tarball = None
                    try:
                        tarball = tarfile.open(conf_tar_source_path, "r")
                        Logger.info("Extracting %s into %s directory." %
                                    (upgrade.BACKUP_CONF_ARCHIVE, extract_dir))
                        tarball.extractall(extract_dir)

                        Logger.info("Deleting temporary tar at %s" %
                                    conf_tar_dest_path)
                        Execute("rm %s" % (conf_tar_dest_path))
                    finally:
                        if tarball:
                            tarball.close()
示例#51
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        Logger.info("Executing Hive Server Stack Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '4.0.0.0') >= 0:
            conf_select.select(params.stack_name, "hive", params.version)
            stack_select.select("hive-server2", params.version)

            # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
            resource_created = copy_to_hdfs("mapreduce",
                                            params.user_group,
                                            params.hdfs_user,
                                            skip=params.host_sys_prepped)

            if resource_created:
                params.HdfsResource(None, action="execute")
示例#52
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)
        if not params.stack_registry_support_schema_migrate:
            if params.upgrade_direction == Direction.UPGRADE:
                Logger.info("Executing bootstrap_storage as it is upgrade")
                self.execute_bootstrap(params)
            else:
                Logger.info(
                    "Not executing bootstrap_storage as it is downgrade")

        if params.version and check_stack_feature(
                StackFeature.ROLLING_UPGRADE,
                format_stack_version(params.version)):
            stack_select.select("registry", params.version)
        if params.version and check_stack_feature(
                StackFeature.CONFIG_VERSIONING, params.version):
            conf_select.select(params.stack_name, "registry", params.version)
示例#53
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params
    env.set_params(params)

    # backup the data directory to /tmp/knox-upgrade-backup/knox-data-backup.tar just in case
    # something happens; Knox is interesting in that they re-generate missing files like
    # keystores which can cause side effects if the upgrade goes wrong
    if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
      absolute_backup_dir = upgrade.backup_data()
      Logger.info("Knox data was successfully backed up to {0}".format(absolute_backup_dir))

    # <conf-selector-tool> will change the symlink to the conf folder.
    conf_select.select(params.stack_name, "knox", params.version)
    stack_select.select("knox-server", params.version)

    # seed the new Knox data directory with the keystores of yesteryear
    if params.upgrade_direction == Direction.UPGRADE:
      upgrade.seed_current_data_directory()
示例#54
0
    def pre_rolling_restart(self, env):
        import params
        env.set_params(params)
        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:

            absolute_backup_dir = None
            if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
                Logger.info("Backing up directories. Initial conf folder: %s" %
                            os.path.realpath(params.knox_conf_dir))

                # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
                absolute_backup_dir = upgrade.backup_data()

            # conf-select will change the symlink to the conf folder.
            conf_select.select(params.stack_name, "knox", params.version)
            hdp_select.select("knox-server", params.version)

            # Extract the tar of the old conf folder into the new conf directory
            if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
                conf_tar_source_path = os.path.join(
                    absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
                if os.path.exists(conf_tar_source_path):
                    extract_dir = os.path.realpath(params.knox_conf_dir)
                    conf_tar_dest_path = os.path.join(
                        extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
                    Logger.info(
                        "Copying %s into %s file." %
                        (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
                    Execute(
                        ('cp', conf_tar_source_path, conf_tar_dest_path),
                        sudo=True,
                    )

                    Execute(
                        ('tar', '-xvf', conf_tar_source_path, '-C',
                         extract_dir),
                        sudo=True,
                    )

                    File(
                        conf_tar_dest_path,
                        action="delete",
                    )
示例#55
0
    def install(self, env):
        print "Install"
        self.prepare()
        component_name = self.get_component_name()
        repo_info = str(default("/hostLevelParams/repo_info", "1.1.1.1-1"))
        matches = re.findall(r"([\d\.]+\-\d+)", repo_info)
        version = matches[0] if matches and len(matches) > 0 else "1.1.1.1-1"

        from resource_management.libraries.functions import stack_tools
        (stack_selector_name, stack_selector_path,
         stack_selector_package) = stack_tools.get_stack_tool(
             stack_tools.STACK_SELECTOR_NAME)
        command = 'ambari-python-wrap {0} install {1}'.format(
            stack_selector_path, version)
        Execute(command)

        if component_name:
            conf_select.select("PERF", component_name, version)
            stack_select.select(component_name, version)
示例#56
0
    def pre_rolling_restart(self, env):
        Logger.info("Executing Rolling Upgrade pre-restart")
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
            conf_select.select(params.stack_name, "hadoop", params.version)
            hdp_select.select("hadoop-mapreduce-historyserver", params.version)
            # MC Hammer said, "Can't touch this"
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)
            params.HdfsResource(None, action="execute")
示例#57
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
      Logger.info("Executing Spark2 Job History Server Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark2", params.version)
      stack_select.select("spark2-historyserver", params.version)

      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
      # need to copy the tarball, otherwise, copy it.
      if params.version and check_stack_feature(StackFeature.TEZ_FOR_SPARK, params.version):
        resource_created = copy_to_hdfs(
          "tez",
          params.user_group,
          params.hdfs_user,
          host_sys_prepped=params.host_sys_prepped)
        if resource_created:
          params.HdfsResource(None, action="execute")
示例#58
0
def zookeeper_service(action='start', upgrade_type=None):
    import params

    # This path may be missing after Ambari upgrade. We need to create it.
    if upgrade_type is None and not os.path.exists(os.path.join(params.stack_root,"/current/zookeeper-server")) and params.current_version \
      and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
        conf_select.select(params.stack_name, "zookeeper",
                           params.current_version)
        stack_select.select("zookeeper-server", params.version)

    cmd = format(
        "env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")

    if action == 'start':
        daemon_cmd = format(
            "source {config_dir}/zookeeper-env.sh ; {cmd} start")
        no_op_test = format(
            "ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1"
        )

        try:
            Execute(daemon_cmd, not_if=no_op_test, user=params.zk_user)
        except:
            show_logs(params.zk_log_dir, params.zk_user)
            raise

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};"
            )

            Execute(kinit_cmd, user=params.smokeuser)

    elif action == 'stop':
        daemon_cmd = format(
            "source {config_dir}/zookeeper-env.sh ; {cmd} stop")
        try:
            Execute(daemon_cmd, user=params.zk_user)
        except:
            show_logs(params.zk_log_dir, params.zk_user)
            raise
        File(params.zk_pid_file, action="delete")
示例#59
0
  def stack_upgrade_save_new_config(self, env):
    """
    Because this gets called during a Rolling Upgrade, the new configs have already been saved, so we must be
    careful to only call configure() on the directory with the new version.
    """
    import params
    env.set_params(params)

    conf_select_name = "spark"
    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)

    if config_dir:
      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))

      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
      # must change it now so this function can find the Jinja Templates for the service.
      env.config.basedir = base_dir
      conf_select.select(params.stack_name, conf_select_name, params.version)
      self.configure(env, config_dir=config_dir, upgrade_type=UPGRADE_TYPE_ROLLING)
示例#60
0
  def pre_upgrade_restart(self, env, upgrade_type=None):
    import params

    env.set_params(params)
    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
      Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart")
      conf_select.select(params.stack_name, "spark", params.version)
      hdp_select.select("spark-historyserver", params.version)

      # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
      # need to copy the tarball, otherwise, copy it.

      if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') < 0:
        resource_created = copy_to_hdfs(
          "tez",
          params.user_group,
          params.hdfs_user,
          host_sys_prepped=params.host_sys_prepped)
        if resource_created:
          params.HdfsResource(None, action="execute")