예제 #1
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        if params.version and compare_versions(
                format_stack_version(params.version), '4.1.0.0') >= 0:
            stack_select.select_packages(params.version)

        # This is extremely important since it should only be called if crossing the IOP 4.2 boundary.
        # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary.
        if params.version and params.upgrade_direction:
            src_version = dst_version = None
            if params.upgrade_direction == Direction.UPGRADE:
                src_version = upgrade_summary.get_source_version(
                    "KAFKA", default_version=params.version)
                dst_version = upgrade_summary.get_target_version(
                    "KAFKA", default_version=params.version)
            else:
                # These represent the original values during the UPGRADE direction
                src_version = upgrade_summary.get_target_version(
                    "KAFKA", default_version=params.version)
                dst_version = upgrade_summary.get_source_version(
                    "KAFKA", default_version=params.version)

            if compare_versions(src_version,
                                '4.2.0.0') < 0 and compare_versions(
                                    dst_version, '4.2.0.0') >= 0:
                # Upgrade from IOP 4.1 to 4.2, Calling the acl migration script requires the configs to be present.
                self.configure(env, upgrade_type=upgrade_type)
                upgrade.run_migration(env, upgrade_type)
예제 #2
0
    def pre_upgrade_restart(self, env, upgrade_type=None):
        import params
        env.set_params(params)

        # grab the current version of the component
        pre_upgrade_version = stack_select.get_role_component_current_stack_version(
        )

        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE,
                                                  params.version):
            stack_select.select_packages(params.version)

        # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary.
        if params.version and params.upgrade_direction:
            src_version = dst_version = None
            if params.upgrade_direction == Direction.UPGRADE:
                src_version = upgrade_summary.get_source_version(
                    "KAFKA", default_version=params.version)
                dst_version = upgrade_summary.get_target_version(
                    "KAFKA", default_version=params.version)
            else:
                # These represent the original values during the UPGRADE direction
                src_version = upgrade_summary.get_target_version(
                    "KAFKA", default_version=params.version)
                dst_version = upgrade_summary.get_source_version(
                    "KAFKA", default_version=params.version)

            if not check_stack_feature(
                    StackFeature.KAFKA_ACL_MIGRATION_SUPPORT,
                    src_version) and check_stack_feature(
                        StackFeature.KAFKA_ACL_MIGRATION_SUPPORT, dst_version):
                # Calling the acl migration script requires the configs to be present.
                self.configure(env, upgrade_type=upgrade_type)
                upgrade.run_migration(env, upgrade_type)
예제 #3
0
    def test_get_stack_feature_version_missing_params(self):
        """
    Tests that simple upgrade information can be extracted from JSON
    :return:
    """
        command_json = TestUpgradeSummary._get_cluster_simple_upgrade_json()
        Script.config = command_json

        summary = upgrade_summary.get_upgrade_summary()
        self.assertEqual(False, summary.is_revert)
        self.assertEqual("UPGRADE", summary.direction)
        self.assertEqual("STANDARD", summary.orchestration)
        self.assertEqual("rolling_upgrade", summary.type)

        services = summary.services
        self.assertEqual("2.4.0.0-1234", services["HDFS"].source_version)
        self.assertEqual("2.5.9.9-9999", services["HDFS"].target_version)

        self.assertEqual("2.4.0.0-1234",
                         upgrade_summary.get_source_version("HDFS"))
        self.assertEqual("2.5.9.9-9999",
                         upgrade_summary.get_target_version("HDFS"))

        self.assertTrue(
            upgrade_summary.get_downgrade_from_version("HDFS") is None)
예제 #4
0
    def convert_tables(self, env):
        import params
        env.set_params(params)

        source_version = upgrade_summary.get_source_version(
            service_name="HIVE")
        target_version = upgrade_summary.get_target_version(
            service_name="HIVE")

        source_dir = format("/usr/hdp/{source_version}")
        target_dir = format("/usr/hdp/{target_version}")

        if params.security_enabled:
            hive_kinit_cmd = format(
                "{kinit_path_local} -kt {hive_server2_keytab} {hive_principal}; "
            )
            Execute(hive_kinit_cmd, user=params.hive_user)

        # in the M10 release PreUpgradeTool was fixed to use Hive1 instead of Hive2
        if target_version >= "3.0.3":
            hive_lib_dir = format("{source_dir}/hive/lib")
        else:
            hive_lib_dir = format("{source_dir}/hive2/lib")

        classpath = format(
            "{hive_lib_dir}/*:{source_dir}/hadoop/*:{source_dir}/hadoop/lib/*:{source_dir}/hadoop-mapreduce/*:{source_dir}/hadoop-mapreduce/lib/*:{source_dir}/hadoop-hdfs/*:{source_dir}/hadoop-hdfs/lib/*:{source_dir}/hadoop/etc/hadoop/:{target_dir}/hive/lib/hive-pre-upgrade.jar:{source_dir}/hive/conf/conf.server"
        )
        # hack to avoid derby cp issue we want derby-10.10.2.0.jar to appear first in cp, if its available, note other derby jars are derbyclient-10.11.1.1.jar  derbynet-10.11.1.1.jar
        derby_jars = glob.glob(source_dir + "/hive2/lib/*derby-*.jar")
        if len(derby_jars) == 1:
            classpath = derby_jars[0] + ":" + classpath
        cmd = format(
            "{java64_home}/bin/java -Djavax.security.auth.useSubjectCredsOnly=false -cp {classpath} org.apache.hadoop.hive.upgrade.acid.PreUpgradeTool -execute"
        )
        Execute(cmd, user=params.hive_user)
예제 #5
0
def get_current_version(service=None, use_upgrading_version_during_upgrade=True):
  """
  Get the effective version to use to copy the tarballs to.
  :param service: the service name when checking for an upgrade.  made optional for unknown \
    code bases that may be using this function
  :param use_upgrading_version_during_upgrade: True, except when the RU/EU hasn't started yet.
  :return: Version, or False if an error occurred.
  """

  from resource_management.libraries.functions import upgrade_summary

  # get the version for this command
  version = stack_features.get_stack_feature_version(Script.get_config())
  if service is not None:
    version = upgrade_summary.get_target_version(service_name=service, default_version=version)


  # if there is no upgrade, then use the command's version
  if not Script.in_stack_upgrade() or use_upgrading_version_during_upgrade:
    Logger.info("Tarball version was calcuated as {0}. Use Command Version: {1}".format(
      version, use_upgrading_version_during_upgrade))

    return version

  # we're in an upgrade and we need to use an older version
  current_version = stack_select.get_role_component_current_stack_version()
  if service is not None:
    current_version = upgrade_summary.get_source_version(service_name=service, default_version=current_version)

  if current_version is None:
    Logger.warning("Unable to determine the current version of the component for this command; unable to copy the tarball")
    return False

  return current_version;
예제 #6
0
    def convert_tables(self, env):
        import params
        env.set_params(params)

        source_version = upgrade_summary.get_source_version(
            service_name="HIVE")
        target_version = upgrade_summary.get_target_version(
            service_name="HIVE")

        source_dir = format("/usr/hdp/{source_version}")
        target_dir = format("/usr/hdp/{target_version}")

        if params.security_enabled:
            hive_kinit_cmd = format(
                "{kinit_path_local} -kt {hive_server2_keytab} {hive_principal}; "
            )
            Execute(hive_kinit_cmd, user=params.hive_user)

        classpath = format(
            "{source_dir}/hive2/lib/*:{source_dir}/hadoop/*:{source_dir}/hadoop/lib/*:{source_dir}/hadoop-mapreduce/*:{source_dir}/hadoop-mapreduce/lib/*:{target_dir}/hive/lib/hive-pre-upgrade.jar:{source_dir}/hive/conf"
        )
        cmd = format(
            "{java64_home}/bin/java -Djavax.security.auth.useSubjectCredsOnly=false -cp {classpath} org.apache.hadoop.hive.upgrade.acid.PreUpgradeTool -execute"
        )
        Execute(cmd, user=params.hive_user)
예제 #7
0
    def convert_tables(self, env):
        import params
        env.set_params(params)

        source_version = upgrade_summary.get_source_version(
            service_name="HIVE")
        target_version = upgrade_summary.get_target_version(
            service_name="HIVE")

        source_dir = format("/usr/hdp/{source_version}")
        target_dir = format("/usr/hdp/{target_version}")

        classpath = format(
            "{source_dir}/hive2/lib/*:{source_dir}/hadoop/*:{source_dir}/hadoop/lib/*:{source_dir}/hadoop-mapreduce/*:{source_dir}/hadoop-mapreduce/lib/*:{target_dir}/hive/lib/hive-pre-upgrade.jar:{source_dir}/hive/conf"
        )
        cmd = format(
            "{java64_home}/bin/java -cp {classpath} org.apache.hadoop.hive.upgrade.acid.PreUpgradeTool -execute"
        )
        Execute(cmd, user="******")
예제 #8
0
if hive_server2_hive2_dir:
  hive2_jdbc_target = format("{hive_server2_hive2_lib}/{jdbc_jar_name}")

# during upgrade / downgrade, use the specific version to copy the JDBC JAR to
if upgrade_direction:
  hive_jdbc_target = format("{hive_version_lib}/{jdbc_jar_name}")
  hive2_jdbc_target = format("{hive_server2_hive2_version_lib}/{jdbc_jar_name}") if hive2_jdbc_target is not None else None


hive2_previous_jdbc_jar = format("{hive_server2_hive2_lib}/{hive_previous_jdbc_jar_name}") if hive_server2_hive2_lib is not None else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")

# normally, the JDBC driver would be referenced by <stack-root>/current/.../foo.jar
# but in RU if <stack-selector-tool> is called and the restart fails, then this means that current pointer
# is now pointing to the upgraded version location; that's bad for the cp command
version_for_source_jdbc_file = upgrade_summary.get_source_version(default_version = version_for_stack_feature_checks)
source_jdbc_file = format("{stack_root}/{version_for_source_jdbc_file}/hive/lib/{jdbc_jar_name}")

check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
                          "org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]

prepackaged_jdbc_name = "ojdbc6.jar"
prepackaged_ojdbc_symlink = format("{hive_lib}/{prepackaged_jdbc_name}")
templeton_port = config['configurations']['webhcat-site']['templeton.port']

#constants for type2 jdbc
jdbc_libs_dir = format("{hive_lib}/native/lib64")
lib_dir_available = os.path.exists(jdbc_libs_dir)
예제 #9
0
파일: params.py 프로젝트: Flipkart/ambari
# server configurations
config = Script.get_config()

tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = default("/hostLevelParams/stack_name", None)
retryAble = default("/commandParams/command_retry_enabled", False)

upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)

# This is the version whose state is CURRENT. During an RU, this is the source version.
# DO NOT format it since we need the build number too.
upgrade_from_version = upgrade_summary.get_source_version()

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version = format_stack_version(stack_version_unformatted)

# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)

stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)

knox_bin = ibm_distribution_knox_dir + '/bin/gateway.sh'
ldap_bin = ibm_distribution_knox_dir + '/bin/ldap.sh'
knox_client_bin = ibm_distribution_knox_dir + '/bin/knoxcli.sh'

namenode_hosts = default("/clusterHostInfo/namenode_host", None)
if type(namenode_hosts) is list:
예제 #10
0
def get_stack_feature_version(config):
  """
  Uses the specified ConfigDictionary to determine which version to use for stack
  feature checks.

  Normally, the commandParams/version is the correct value to use as it represent the 4-digit
  exact stack version/build being upgrade to or downgraded to. However, there are cases where the
  commands being sent are to stop running services which are on a different stack version from the
  version being upgraded/downgraded to. As a result, the configurations sent for these specific
  stop commands do not match commandParams/version.
  :param config:  a ConfigDictionary instance to extra the hostLevelParams
                  and commandParams from.
  :return: the version to use when checking stack features.
  """
  from resource_management.libraries.functions.default import default

  if "hostLevelParams" not in config or "commandParams" not in config:
    raise Fail("Unable to determine the correct version since hostLevelParams and commandParams were not present in the configuration dictionary")

  # should always be there
  stack_version = config['hostLevelParams']['stack_version']

  # something like 2.4.0.0-1234; represents the version for the command
  # (or None if this is a cluster install and it hasn't been calculated yet)
  # this is always guaranteed to be the correct version for the command, even in
  # upgrade and downgrade scenarios
  command_version = default("/commandParams/version", None)
  command_stack = default("/commandParams/target_stack", None)

  # UPGRADE or DOWNGRADE (or None)
  upgrade_direction = default("/commandParams/upgrade_direction", None)

  # start out with the value that's right 99% of the time
  version_for_stack_feature_checks = command_version if command_version is not None else stack_version

  # if this is not an upgrade, then we take the simple path
  if upgrade_direction is None:
    Logger.info(
      "Stack Feature Version Info: Cluster Stack={0}, Command Stack={1}, Command Version={2} -> {3}".format(
        stack_version, command_stack, command_version, version_for_stack_feature_checks))

    return version_for_stack_feature_checks

  # STOP commands are the trouble maker as they are intended to stop a service not on the
  # version of the stack being upgrade/downgraded to
  is_stop_command = _is_stop_command(config)
  if not is_stop_command:
    Logger.info(
      "Stack Feature Version Info: Cluster Stack={0}, Command Stack={1}, Command Version={2}, Upgrade Direction={3} -> {4}".format(
        stack_version, command_stack, command_version, upgrade_direction,
        version_for_stack_feature_checks))

    return version_for_stack_feature_checks

  is_downgrade = upgrade_direction.lower() == Direction.DOWNGRADE.lower()
  # guaranteed to have a STOP command now during an UPGRADE/DOWNGRADE, check direction
  if is_downgrade:
    from resource_management.libraries.functions import upgrade_summary
    version_for_stack_feature_checks = upgrade_summary.get_source_version(default_version = version_for_stack_feature_checks)
  else:
    # UPGRADE
      version_for_stack_feature_checks = command_version if command_version is not None else stack_version

  Logger.info(
    "Stack Feature Version Info: Cluster Stack={0}, Command Stack={1}, Command Version={2}, Upgrade Direction={3}, stop_command={4} -> {5}".format(
      stack_version, command_stack, command_version, upgrade_direction,
      is_stop_command, version_for_stack_feature_checks))

  return version_for_stack_feature_checks
예제 #11
0
    def upgrade_schema(self, env):
        """
    Executes the schema upgrade binary.  This is its own function because it could
    be called as a standalone task from the upgrade pack, but is safe to run it for each
    metastore instance. The schema upgrade on an already upgraded metastore is a NOOP.

    The metastore schema upgrade requires a database driver library for most
    databases. During an upgrade, it's possible that the library is not present,
    so this will also attempt to copy/download the appropriate driver.

    This function will also ensure that configurations are written out to disk before running
    since the new configs will most likely not yet exist on an upgrade.

    Should not be invoked for a DOWNGRADE; Metastore only supports schema upgrades.
    """
        Logger.info("Upgrading Hive Metastore Schema")
        import params
        env.set_params(params)

        # ensure that configurations are written out before trying to upgrade the schema
        # since the schematool needs configs and doesn't know how to use the hive conf override
        self.configure(env)

        if params.security_enabled:
            kinit_command = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; "
            )
            Execute(kinit_command, user=params.smokeuser)

        # ensure that the JDBC drive is present for the schema tool; if it's not
        # present, then download it first
        if params.hive_jdbc_driver in params.hive_jdbc_drivers_list:
            target_directory = format("/usr/iop/{version}/hive/lib")

            # download it if it does not exist
            if not os.path.exists(params.source_jdbc_file):
                jdbc_connector(params.hive_jdbc_target,
                               params.hive_previous_jdbc_jar)

            target_directory_and_filename = os.path.join(
                target_directory, os.path.basename(params.source_jdbc_file))

            if params.sqla_db_used:
                target_native_libs_directory = format(
                    "{target_directory}/native/lib64")

                Execute(
                    format(
                        "yes | {sudo} cp {jars_in_hive_lib} {target_directory}"
                    ))

                Directory(target_native_libs_directory, create_parents=True)

                Execute(
                    format(
                        "yes | {sudo} cp {libs_in_hive_lib} {target_native_libs_directory}"
                    ))

                Execute(
                    format(
                        "{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"
                    ))
            else:
                # copy the JDBC driver from the older metastore location to the new location only
                # if it does not already exist
                if not os.path.exists(target_directory_and_filename):
                    Execute(('cp', params.source_jdbc_file, target_directory),
                            path=["/bin", "/usr/bin/"],
                            sudo=True)

            File(target_directory_and_filename, mode=0644)

        # build the schema tool command
        binary = format("/usr/iop/{version}/hive/bin/schematool")

        # the conf.server directory changed locations
        # since the configurations have not been written out yet during an upgrade
        # we need to choose the original legacy location
        schematool_hive_server_conf_dir = params.hive_server_conf_dir

        upgrade_from_version = upgrade_summary.get_source_version(
            "HIVE", default_version=params.version_for_stack_feature_checks)

        if compare_versions(upgrade_from_version, "4.1.0.0") < 0:
            schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF

        env_dict = {'HIVE_CONF_DIR': schematool_hive_server_conf_dir}

        command = format(
            "{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
        Execute(command,
                user=params.hive_user,
                tries=1,
                environment=env_dict,
                logoutput=True)
예제 #12
0
    def upgrade_schema(self, env):
        """
    Executes the schema upgrade binary.  This is its own function because it could
    be called as a standalone task from the upgrade pack, but is safe to run it for each
    metastore instance.

    The metastore schema upgrade requires a database driver library for most
    databases. During an upgrade, it's possible that the library is not present,
    so this will also attempt to copy/download the appropriate driver.
    """
        Logger.info("Upgrading Hive Metastore")
        import params
        env.set_params(params)

        if params.security_enabled:
            kinit_command = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; "
            )
            Execute(kinit_command, user=params.smokeuser)

        # ensure that the JDBC drive is present for the schema tool; if it's not
        # present, then download it first
        if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
            source = params.source_jdbc_file
            target_directory = format("/usr/iop/{version}/hive/lib")
            if not os.path.exists(source):
                # download it
                jdbc_connector()

            Execute(('cp', source, target_directory),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

            File(
                os.path.join(target_directory, os.path.basename(source)),
                mode=0644,
            )

        # build the schema tool command
        binary = format("/usr/iop/{version}/hive/bin/schematool")

        # the conf.server directory changed locations
        # since the configurations have not been written out yet during an upgrade
        # we need to choose the original legacy location
        schematool_hive_server_conf_dir = params.hive_server_conf_dir

        upgrade_from_version = upgrade_summary.get_source_version(
            "HIVE", default_version=params.version_for_stack_feature_checks)

        if params.version_for_stack_feature_checks is not None:
            if compare_versions(upgrade_from_version, "4.1.0.0") < 0:
                schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF

        env_dict = {'HIVE_CONF_DIR': schematool_hive_server_conf_dir}

        command = format(
            "{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
        Execute(command,
                user=params.hive_user,
                tries=1,
                environment=env_dict,
                logoutput=True)