def test_get_downgrade_from_version(self): """ Tests that simple downgrade returns the correct version :return: """ command_json = TestUpgradeSummary._get_cluster_simple_downgrade_json() Script.config = command_json self.assertIsNone(upgrade_summary.get_downgrade_from_version("FOO")) self.assertEqual("2.5.9.9-9999", upgrade_summary.get_downgrade_from_version("HDFS"))
def test_get_stack_feature_version_missing_params(self): """ Tests that simple upgrade information can be extracted from JSON :return: """ command_json = TestUpgradeSummary._get_cluster_simple_upgrade_json() Script.config = command_json summary = upgrade_summary.get_upgrade_summary() self.assertEqual(False, summary.is_revert) self.assertEqual("UPGRADE", summary.direction) self.assertEqual("STANDARD", summary.orchestration) self.assertEqual("rolling_upgrade", summary.type) services = summary.services self.assertEqual("2.4.0.0-1234", services["HDFS"].source_version) self.assertEqual("2.5.9.9-9999", services["HDFS"].target_version) self.assertEqual("2.4.0.0-1234", upgrade_summary.get_source_version("HDFS")) self.assertEqual("2.5.9.9-9999", upgrade_summary.get_target_version("HDFS")) self.assertTrue( upgrade_summary.get_downgrade_from_version("HDFS") is None)
# This is expected to be of the form #.#.#.# stack_version_unformatted = status_params.stack_version_unformatted stack_version_formatted_major = status_params.stack_version_formatted_major # this is not available on INSTALL action because <stack-selector-tool> is not available stack_version_formatted = functions.get_stack_version('hive-server2') major_stack_version = get_major_version(stack_version_formatted_major) # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade. # It cannot be used during the initial Cluser Install because the version is not yet known. version = default("/commandParams/version", None) # When downgrading the 'version' is pointing to the downgrade-target version # downgrade_from_version provides the source-version the downgrade is happening from downgrade_from_version = upgrade_summary.get_downgrade_from_version("HIVE") # get the correct version to use for checking stack features version_for_stack_feature_checks = get_stack_feature_version(config) # Upgrade direction upgrade_direction = default("/commandParams/upgrade_direction", None) stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks) stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks) stack_supports_ranger_hive_jdbc_url_change = check_stack_feature(StackFeature.RANGER_HIVE_PLUGIN_JDBC_URL, version_for_stack_feature_checks) stack_supports_atlas_hook_for_hive_interactive = check_stack_feature(StackFeature.HIVE_INTERACTIVE_ATLAS_HOOK_REQUIRED, version_for_stack_feature_checks) stack_supports_hive_interactive_ga = check_stack_feature(StackFeature.HIVE_INTERACTIVE_GA_SUPPORT, version_for_stack_feature_checks) # component ROLE directory (like hive-metastore or hive-server2-hive2) component_directory = status_params.component_directory component_directory_interactive = status_params.component_directory_interactive
version_for_stack_feature_checks = get_stack_feature_version(config) stack_supports_ranger_kerberos = check_stack_feature( StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks) stack_supports_ranger_audit_db = check_stack_feature( StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks) stack_supports_core_site_for_ranger_plugin = check_stack_feature( StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, version_for_stack_feature_checks) stack_supports_kafka_env_include_ranger_script = check_stack_feature( StackFeature.KAFKA_ENV_INCLUDE_RANGER_SCRIPT, version_for_stack_feature_checks) # When downgrading the 'version' is pointing to the downgrade-target version # downgrade_from_version provides the source-version the downgrade is happening from downgrade_from_version = upgrade_summary.get_downgrade_from_version("KAFKA") hostname = config['agentLevelParams']['hostname'] # default kafka parameters kafka_home = '/usr/lib/kafka' kafka_bin = kafka_home + '/bin/kafka' conf_dir = "/etc/kafka/conf" limits_conf_dir = "/etc/security/limits.d" # Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh zookeeper_connect = default("/configurations/kafka-broker/zookeeper.connect", None) kafka_user_nofile_limit = default( '/configurations/kafka-env/kafka_user_nofile_limit', 128000)