def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing NodeManager Stack Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0: stack_select.select_packages(params.version)
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions( format_stack_version(params.version), '4.0.0.0') >= 0: stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart") # TODO, change to "spark" after RPM switches the name stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) # grab the current version of the component pre_upgrade_version = stack_select.get_role_component_current_stack_version( ) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): stack_select.select_packages(params.version) # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary. if params.version and params.upgrade_direction: src_version = dst_version = None if params.upgrade_direction == Direction.UPGRADE: src_version = upgrade_summary.get_source_version( "KAFKA", default_version=params.version) dst_version = upgrade_summary.get_target_version( "KAFKA", default_version=params.version) else: # These represent the original values during the UPGRADE direction src_version = upgrade_summary.get_target_version( "KAFKA", default_version=params.version) dst_version = upgrade_summary.get_source_version( "KAFKA", default_version=params.version) if not check_stack_feature( StackFeature.KAFKA_ACL_MIGRATION_SUPPORT, src_version) and check_stack_feature( StackFeature.KAFKA_ACL_MIGRATION_SUPPORT, dst_version): # Calling the acl migration script requires the configs to be present. self.configure(env, upgrade_type=upgrade_type) upgrade.run_migration(env, upgrade_type)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)): stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info( "Executing Hive Server Interactive Stack Upgrade pre-restart") import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): stack_select.select_packages(params.version) # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS resource_created = copy_to_hdfs( "hive", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs) resource_created = copy_to_hdfs( "tez_hive2", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs ) or resource_created resource_created = copy_to_hdfs( "yarn", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs ) or resource_created if resource_created: params.HdfsResource(None, action="execute")
def pre_upgrade_restart(self, env, upgrade_type=None): """ Performs the tasks surrounding the Oozie startup when a rolling upgrade is in progress. This includes backing up the configuration, updating the database, preparing the WAR, and installing the sharelib in HDFS. :param env: :return: """ import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least IOP 4.0.0.0 if not params.version or compare_versions( format_stack_version(params.version), '4.0.0.0') < 0: return Logger.info("Executing Oozie Server Rolling Upgrade pre-restart") oozie_server_upgrade.backup_configuration() stack_select.select_packages(params.version) #Execute(format("iop-select set oozie-server {version}")) oozie_server_upgrade.restore_configuration() #oozie_server_upgrade.prepare_libext_directory() oozie_server_upgrade.upgrade_oozie()
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.stack_version_formatted and check_stack_feature( StackFeature.NFS, params.stack_version_formatted): stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) stack_select.select_packages(params.version) self.set_ru_rangeradmin_in_progress(params.upgrade_marker_file)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if Script.is_stack_greater_or_equal("4.2"): # phoenix uses hbase configs stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): stack_select.select_packages(params.version)
def test_legacy_package_fallback(self, stack_select_select_mock, get_supported_packages_mock): """ Tests that if the package specified by the JSON isn't support by the stack-select tool, the the fallback legacy value is used. :return: """ get_supported_packages_mock.return_value = ["foo-legacy"] version = "2.5.9.9-9999" command_json = TestStackSelect._get_cluster_simple_upgrade_json() Script.config = dict() Script.config.update(command_json) Script.config.update({ "configurations": { "cluster-env": {} }, "clusterLevelParams": {} }) Script.config["configurations"]["cluster-env"][ "stack_packages"] = self._get_stack_packages_with_legacy() Script.config["clusterLevelParams"] = {"stack_name": "HDP"} stack_select.select_packages(version) self.assertEqual(len(stack_select_select_mock.call_args_list), 1) self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-legacy", version))
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) stack_select.select_packages(params.version) kms(upgrade_type=upgrade_type) setup_java_patch()
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, params.version_for_stack_feature_checks): stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): """ Performs the tasks that should be done before an upgrade of oozie. This includes: - backing up configurations - running <stack-selector-tool> and <conf-selector-tool> - restoring configurations - preparing the libext directory :param env: :return: """ import params env.set_params(params) # this function should not execute if the version can't be determined or # the stack does not support rolling upgrade if not (params.version and check_stack_feature( StackFeature.ROLLING_UPGRADE, params.version)): return Logger.info("Executing Oozie Server Stack Upgrade pre-restart") if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): stack_select.select_packages(params.version) OozieUpgrade.prepare_libext_directory(upgrade_type=upgrade_type)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions( format_stack_version(params.version), '4.1.0.0') >= 0: stack_select.select_packages(params.version) # This is extremely important since it should only be called if crossing the IOP 4.2 boundary. # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary. if params.version and params.upgrade_direction: src_version = dst_version = None if params.upgrade_direction == Direction.UPGRADE: src_version = upgrade_summary.get_source_version( "KAFKA", default_version=params.version) dst_version = upgrade_summary.get_target_version( "KAFKA", default_version=params.version) else: # These represent the original values during the UPGRADE direction src_version = upgrade_summary.get_target_version( "KAFKA", default_version=params.version) dst_version = upgrade_summary.get_source_version( "KAFKA", default_version=params.version) if compare_versions(src_version, '4.2.0.0') < 0 and compare_versions( dst_version, '4.2.0.0') >= 0: # Upgrade from IOP 4.1 to 4.2, Calling the acl migration script requires the configs to be present. self.configure(env, upgrade_type=upgrade_type) upgrade.run_migration(env, upgrade_type)
def configure(self, env, upgrade_type=None): import params # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary if upgrade_type is None: upgrade_type = Script.get_upgrade_type( default("/commandParams/upgrade_type", "")) if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None: Logger.info( format( "Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}" )) if params.version and check_stack_feature( StackFeature.ROLLING_UPGRADE, params.version): # In order for the "<stack-root>/current/oozie-<client/server>" point to the new version of # oozie, we need to create the symlinks both for server and client. # This is required as both need to be pointing to new installed oozie version. # Sets the symlink : eg: <stack-root>/current/oozie-client -> <stack-root>/a.b.c.d-<version>/oozie # Sets the symlink : eg: <stack-root>/current/oozie-server -> <stack-root>/a.b.c.d-<version>/oozie stack_select.select_packages(params.version) env.set_params(params) oozie(is_server=True, upgrade_type=upgrade_type)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.stack_supports_ranger_tagsync: Logger.info("Executing Ranger Tagsync Stack Upgrade pre-restart") stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) if check_stack_feature(StackFeature.ZKFC_VERSION_ADVERTISED, params.version_for_stack_feature_checks): stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions( format_stack_version(params.version), '4.0.0.0') >= 0: stack_select.select_packages(params.version)
def test_select_package_for_standard_orchestration( self, stack_select_select_mock, get_supported_packages_mock): """ Tests that missing the service & role throws an excpetion :return: """ get_supported_packages_mock.return_value = TestStackSelect._get_supported_packages( ) version = "2.5.9.9-9999" command_json = TestStackSelect._get_cluster_simple_upgrade_json() Script.config = dict() Script.config.update(command_json) Script.config.update({ "configurations": { "cluster-env": {} }, "clusterLevelParams": {} }) Script.config["configurations"]["cluster-env"][ "stack_packages"] = self._get_stack_packages() Script.config["clusterLevelParams"] = {"stack_name": "HDP"} stack_select.select_packages(version) self.assertEqual(len(stack_select_select_mock.call_args_list), 2) self.assertEqual(stack_select_select_mock.call_args_list[0][0], ("foo-master", version)) self.assertEqual(stack_select_select_mock.call_args_list[1][0], ("foo-client", version))
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.stack_version_formatted and check_stack_feature( StackFeature.PHOENIX, params.stack_version_formatted): # phoenix uses hbase configs stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): Logger.info("Executing Spark Client Stack Upgrade pre-restart") # TODO, change to "spark" after RPM switches the name stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing WebHCat Stack Upgrade pre-restart") import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): node_type_lower = self.nodeType.lower() Logger.info(format("Executing druid-{node_type_lower} Upgrade pre-restart")) import params env.set_params(params) if params.stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version): stack_select.select_packages(params.stack_version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks. # Therefore, we cannot call this code in that scenario. if upgrade_type != constants.UPGRADE_TYPE_NON_ROLLING or params.upgrade_direction != Direction.DOWNGRADE: stack_select.select_packages(params.version)
def set_pre_start(self, env): import params env.set_params(params) upgrade_stack = stack_select._get_upgrade_stack() if upgrade_stack is None: raise Fail('Unable to determine the stack and stack version') stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing WebHCat Stack Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions( format_stack_version(params.version), '4.1.0.0') >= 0: # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set stack_select.select_packages(params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade post-restart") import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): stack_select.select_packages(params.version) # MC Hammer said, "Can't touch this" copy_to_hdfs("yarn", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs) params.HdfsResource(None, action="execute")
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) # this function should not execute if the version can't be determined or # the stack does not support rolling upgrade if not (params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted)): return Logger.info("Executing Accumulo Client Upgrade pre-restart") stack_select.select_packages(params.version)