def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: absolute_backup_dir = None if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE: Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir)) # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar absolute_backup_dir = upgrade.backup_data() # conf-select will change the symlink to the conf folder. conf_select.select(params.stack_name, "knox", params.version) hdp_select.select("knox-server", params.version) # Extract the tar of the old conf folder into the new conf directory if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE: conf_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE) if os.path.exists(conf_tar_source_path): extract_dir = os.path.realpath(params.knox_conf_dir) conf_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_CONF_ARCHIVE) Logger.info("Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path)) Execute(('cp', conf_tar_source_path, conf_tar_dest_path), sudo = True, ) tar_archive.untar_archive(conf_tar_source_path, extract_dir) File(conf_tar_dest_path, action = "delete", )
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "storm", params.version) hdp_select.select("storm-client", params.version)
def configure(self, env, upgrade_type=None): import params # The configure command doesn't actually receive the upgrade_type from Script.py, so get it from the config dictionary if upgrade_type is None: restart_type = default("/commandParams/restart_type", "") if restart_type.lower() == "rolling_upgrade": upgrade_type = UPGRADE_TYPE_ROLLING elif restart_type.lower() == "nonrolling_upgrade": upgrade_type = UPGRADE_TYPE_NON_ROLLING if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None: Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}")) if compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of # oozie, we need to create the symlinks both for server and client. # This is required as both need to be pointing to new installed oozie version. # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie hdp_select.select("oozie-client", params.version) # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie hdp_select.select("oozie-server", params.version) if compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0: conf_select.select(params.stack_name, "oozie", params.version) env.set_params(params) oozie(is_server=True)
def zookeeper_service(action='start', rolling_restart=False): import params # This path may be missing after Ambari upgrade. We need to create it. if not rolling_restart and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version: conf_select.select(params.stack_name, "zookeeper", params.current_version) hdp_select.select("zookeeper-server", params.version) cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh") if action == 'start': daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start") no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1") Execute(daemon_cmd, not_if=no_op_test, user=params.zk_user ) if params.security_enabled: kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") Execute(kinit_cmd, user=params.smokeuser ) elif action == 'stop': daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop") rm_pid = format("rm -f {zk_pid_file}") Execute(daemon_cmd, user=params.zk_user ) Execute(rm_pid)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: hdp_select.select("kafka-broker", params.version) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0: conf_select.select(params.stack_name, "kafka", params.version) # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary. if params.current_version and params.version and params.upgrade_direction: src_version = dst_version = None if params.upgrade_direction == Direction.UPGRADE: src_version = format_hdp_stack_version(params.current_version) dst_version = format_hdp_stack_version(params.version) else: # These represent the original values during the UPGRADE direction src_version = format_hdp_stack_version(params.version) dst_version = format_hdp_stack_version(params.downgrade_from_version) if compare_versions(src_version, '2.3.4.0') < 0 and compare_versions(dst_version, '2.3.4.0') >= 0: # Calling the acl migration script requires the configs to be present. self.configure(env, upgrade_type=upgrade_type) upgrade.run_migration(env, upgrade_type)
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-client", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if Script.is_hdp_stack_greater_or_equal('2.3.0.0'): conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-hdfs-nfs3", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing DataNode Stack Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-hdfs-datanode", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) conf_select.select(params.stack_name, "mahout", params.version) hdp_select.select("mahout-client", params.version )
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.2.0') >= 0: conf_select.select(params.stack_name, "spark", params.version) hdp_select.select("spark-thriftserver", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): """ Performs the tasks that should be done before an upgrade of oozie. This includes: - backing up configurations - running hdp-select and conf-select - restoring configurations - preparing the libext directory :param env: :return: """ import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0: return Logger.info("Executing Oozie Server Stack Upgrade pre-restart") OozieUpgrade.backup_configuration() if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "oozie", params.version) hdp_select.select("oozie-server", params.version) OozieUpgrade.restore_configuration() OozieUpgrade.prepare_libext_directory()
def pre_rolling_restart(self, env): Logger.info("Executing Rolling Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "zookeeper", params.version) hdp_select.select("zookeeper-client", params.version)
def pre_rolling_restart(self, env): Logger.info("Executing Rolling Upgrade post-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-yarn-resourcemanager", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: Logger.info("Executing Spark Client Stack Upgrade pre-restart") conf_select.select(params.stack_name, "spark", params.version) hdp_select.select("spark-client", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "zookeeper", params.version) hdp_select.select("zookeeper-server", params.version)
def pre_rolling_restart(self, env): Logger.info("Executing DataNode Rolling Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0: conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-hdfs-datanode", params.version)
def pre_rolling_restart(self, env): import params env.set_params(params) if Script.is_hdp_stack_greater_or_equal("2.3"): # phoenix uses hbase configs conf_select.select(params.stack_name, "hbase", params.version) hdp_select.select("phoenix-server", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing WebHCat Stack Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set conf_select.select(params.stack_name, "hive-hcatalog", params.version) conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hive-webhcat", params.version)
def pre_rolling_restart(self, env): import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0: return Logger.info("Executing Oozie Client Rolling Upgrade pre-restart") conf_select.select(params.stack_name, "oozie", params.version) hdp_select.select("oozie-client", params.version)
def pre_rolling_restart(self, env): import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if Script.is_hdp_stack_less_than("2.2"): return Logger.info("Executing Accumulo Client Rolling Upgrade pre-restart") conf_select.select(params.stack_name, "accumulo", params.version) hdp_select.select("accumulo-client", params.version)
def pre_rolling_restart(self, env): Logger.info("Executing Metastore Rolling Upgrade pre-restart") import params env.set_params(params) if Script.is_hdp_stack_greater_or_equal("2.3"): self.upgrade_schema(env) if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0: conf_select.select(params.stack_name, "hive", params.version) hdp_select.select("hive-metastore", params.version)
def pre_rolling_restart(self, env): Logger.info("Executing Rolling Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-mapreduce-historyserver", params.version) # MC Hammer said, "Can't touch this" copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped) copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped) params.HdfsResource(None, action="execute")
def pre_rolling_restart(self, env): import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if Script.is_hdp_stack_less_than("2.2"): return Logger.info("Executing Falcon Server Rolling Upgrade pre-restart") conf_select.select(params.stack_name, "falcon", params.version) hdp_select.select("falcon-server", params.version) falcon_server_upgrade.pre_start_restore()
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks. # Therefore, we cannot call this code in that scenario. call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")] for e in call_if: if (upgrade_type, params.upgrade_direction) == e: conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-hdfs-namenode", params.version)
def pre_rolling_restart(self, env): Logger.info("Executing Rolling Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions( format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-mapreduce-historyserver", params.version) # MC Hammer said, "Can't touch this" copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user) copy_to_hdfs("tez", params.user_group, params.hdfs_user) params.HdfsResource(None, action="execute")
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "slider", params.version) hdp_select.select("slider-client", params.version) # also set all of the hadoop clients since slider client is upgraded as # part of the final "CLIENTS" group and we need to ensure that # hadoop-client is also set conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-client", params.version)
def pre_rolling_restart(self, env): import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or Script.is_hdp_stack_less_than("2.2"): return Logger.info("Executing Flume Rolling Upgrade pre-restart") conf_select.select(params.stack_name, "flume", params.version) hdp_select.select("flume-server", params.version) flume_upgrade.pre_start_restore()
def pre_rolling_restart(self, env): import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or compare_versions( format_hdp_stack_version(params.version), '2.2.0.0') < 0: return Logger.info("Executing Oozie Client Rolling Upgrade pre-restart") conf_select.select(params.stack_name, "oozie", params.version) hdp_select.select("oozie-client", params.version)
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "slider", params.version) hdp_select.select("slider-client", params.version) # also set all of the hadoop clients since slider client is upgraded as # part of the final "CLIENTS" group and we need to ensure that # hadoop-client is also set conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-client", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0: return Logger.info("Executing Falcon Client Stack Upgrade pre-restart") conf_select.select(params.stack_name, "falcon", params.version) hdp_select.select("falcon-client", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Stack Upgrade pre-restart") import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if Script.is_hdp_stack_less_than("2.2"): return Logger.info("Executing Falcon Server Rolling Upgrade pre-restart") conf_select.select(params.stack_name, "falcon", params.version) hdp_select.select("falcon-server", params.version) falcon_server_upgrade.pre_start_restore()
def pre_rolling_restart(self, env): Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hive", params.version) hdp_select.select("hive-server2", params.version) # Copy mapreduce.tar.gz and tez.tar.gz to HDFS resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user) resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user) or resource_created if resource_created: params.HdfsResource(None, action="execute")
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Metastore Stack Upgrade pre-restart") import params env.set_params(params) is_stack_hdp_23 = Script.is_hdp_stack_greater_or_equal("2.3") is_upgrade = params.upgrade_direction == Direction.UPGRADE if is_stack_hdp_23 and is_upgrade: self.upgrade_schema(env) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hive", params.version) hdp_select.select("hive-metastore", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Metastore Stack Upgrade pre-restart") import params env.set_params(params) if Script.is_hdp_stack_greater_or_equal("2.3"): # ensure that configurations are written out before trying to upgrade the schema # since the schematool needs configs and doesn't know how to use the hive conf override self.configure(env) self.upgrade_schema(env) if params.version and compare_versions( format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hive", params.version) hdp_select.select("hive-metastore", params.version)
def pre_rolling_restart(self, env): import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or Script.is_hdp_stack_less_than("2.2"): return Logger.info("Executing Flume Rolling Upgrade pre-restart") conf_select.select(params.stack_name, "flume", params.version) hdp_select.select("flume-server", params.version) # only restore on upgrade, not downgrade if params.upgrade_direction == Direction.UPGRADE: flume_upgrade.pre_start_restore()
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or Script.is_hdp_stack_less_than("2.2"): return Logger.info("Executing Flume Stack Upgrade pre-restart") conf_select.select(params.stack_name, "flume", params.version) hdp_select.select("flume-server", params.version) # only restore on upgrade, not downgrade if params.upgrade_direction == Direction.UPGRADE: flume_upgrade.pre_start_restore()
def configure(self, env, upgrade_type=None): import params if upgrade_type == "nonrolling" and params.upgrade_direction == Direction.UPGRADE and \ params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "oozie", params.version) # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of # oozie, we need to create the symlinks both for server and client. # This is required as both need to be pointing to new installed oozie version. # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie hdp_select.select("oozie-client", params.version) # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie hdp_select.select("oozie-server", params.version) env.set_params(params) oozie(is_server=True)
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions( format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "spark", params.version) hdp_select.select("spark-historyserver", params.version) # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not # need to copy the tarball, otherwise, copy it. if compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') < 0: resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user) if resource_created: params.HdfsResource(None, action="execute")
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), "2.2.0.0") >= 0: conf_select.select(params.stack_name, "spark", params.version) hdp_select.select("spark-historyserver", params.version) # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not # need to copy the tarball, otherwise, copy it. if params.version and compare_versions(format_hdp_stack_version(params.version), "2.3.0.0") < 0: resource_created = copy_to_hdfs( "tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped ) if resource_created: params.HdfsResource(None, action="execute")
def pre_rolling_restart(self, env): Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart") import params env.set_params(params) if params.version and compare_versions( format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hive", params.version) hdp_select.select("hive-server2", params.version) # Copy mapreduce.tar.gz and tez.tar.gz to HDFS resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user) resource_created = copy_to_hdfs( "tez", params.user_group, params.hdfs_user) or resource_created if resource_created: params.HdfsResource(None, action="execute")
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions( format_hdp_stack_version(params.version), '2.2.0.0') >= 0: absolute_backup_dir = None if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE: Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir)) # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar absolute_backup_dir = upgrade.backup_data() # conf-select will change the symlink to the conf folder. conf_select.select(params.stack_name, "knox", params.version) hdp_select.select("knox-server", params.version) # Extract the tar of the old conf folder into the new conf directory if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE: conf_tar_source_path = os.path.join( absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE) if os.path.exists(conf_tar_source_path): extract_dir = os.path.realpath(params.knox_conf_dir) conf_tar_dest_path = os.path.join( extract_dir, upgrade.BACKUP_CONF_ARCHIVE) Logger.info( "Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path)) Execute("cp %s %s" % (conf_tar_source_path, conf_tar_dest_path)) tarball = None try: tarball = tarfile.open(conf_tar_source_path, "r") Logger.info("Extracting %s into %s directory." % (upgrade.BACKUP_CONF_ARCHIVE, extract_dir)) tarball.extractall(extract_dir) Logger.info("Deleting temporary tar at %s" % conf_tar_dest_path) Execute("rm %s" % (conf_tar_dest_path)) finally: if tarball: tarball.close()
def pre_rolling_restart(self, env): import params env.set_params(params) if params.version and compare_versions( format_hdp_stack_version(params.version), '2.2.0.0') >= 0: absolute_backup_dir = None if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE: Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir)) # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar absolute_backup_dir = upgrade.backup_data() # conf-select will change the symlink to the conf folder. conf_select.select(params.stack_name, "knox", params.version) hdp_select.select("knox-server", params.version) # Extract the tar of the old conf folder into the new conf directory if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE: conf_tar_source_path = os.path.join( absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE) if os.path.exists(conf_tar_source_path): extract_dir = os.path.realpath(params.knox_conf_dir) conf_tar_dest_path = os.path.join( extract_dir, upgrade.BACKUP_CONF_ARCHIVE) Logger.info( "Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path)) Execute( ('cp', conf_tar_source_path, conf_tar_dest_path), sudo=True, ) Execute( ('tar', '-xvf', conf_tar_source_path, '-C', extract_dir), sudo=True, ) File( conf_tar_dest_path, action="delete", )
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart") conf_select.select(params.stack_name, "spark", params.version) hdp_select.select("spark-historyserver", params.version) # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not # need to copy the tarball, otherwise, copy it. if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') < 0: resource_created = copy_to_hdfs( "tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped) if resource_created: params.HdfsResource(None, action="execute")
def pre_upgrade_restart(self, env, upgrade_type=None): """ Execute hdp-select before reconfiguring this client to the new HDP version. :param env: :param upgrade_type: :return: """ Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart") import params env.set_params(params) # this function should not execute if the version can't be determined or # is not at least HDP 2.2.0.0 if not params.version or compare_versions(params.version, "2.2", format=True) < 0: return # HCat client doesn't have a first-class entry in hdp-select. Since clients always # update after daemons, this ensures that the hcat directories are correct on hosts # which do not include the WebHCat daemon hdp_select.select("hive-webhcat", params.version)
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hbase", params.version) hdp_select.select("hbase-client", params.version) # phoenix may not always be deployed try: hdp_select.select("phoenix-client", params.version) except Exception as e: print "Ignoring error due to missing phoenix-client" print str(e) # set all of the hadoop clients since hbase client is upgraded as part # of the final "CLIENTS" group and we need to ensure that hadoop-client # is also set conf_select.select(params.stack_name, "hadoop", params.version) hdp_select.select("hadoop-client", params.version)
def prestart(env, hdp_component): import params if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "hbase", params.version) hdp_select.select(hdp_component, params.version)
def zookeeper(type=None, rolling_restart=False): import params Directory(params.config_dir, owner=params.zk_user, recursive=True, group=params.user_group) File(os.path.join(params.config_dir, "zookeeper-env.sh"), content=InlineTemplate(params.zk_env_sh_template), owner=params.zk_user, group=params.user_group) configFile("zoo.cfg", template_name="zoo.cfg.j2") configFile("configuration.xsl", template_name="configuration.xsl.j2") Directory(params.zk_pid_dir, owner=params.zk_user, recursive=True, group=params.user_group) Directory(params.zk_log_dir, owner=params.zk_user, recursive=True, group=params.user_group) Directory(params.zk_data_dir, owner=params.zk_user, recursive=True, cd_access="a", group=params.user_group) if type == 'server': myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1) File(os.path.join(params.zk_data_dir, "myid"), mode=0644, content=myid) # This path may be missing after Ambari upgrade. We need to create it. if (not rolling_restart) and ( not os.path.exists("/usr/hdp/current/zookeeper-server") ) and params.current_version: conf_select(params.stack_name, "zookeeper", params.current_version) hdp_select.select("zookeeper-server", params.version) if (params.log4j_props != None): File(os.path.join(params.config_dir, "log4j.properties"), mode=0644, group=params.user_group, owner=params.zk_user, content=params.log4j_props) elif (os.path.exists(os.path.join(params.config_dir, "log4j.properties"))): File(os.path.join(params.config_dir, "log4j.properties"), mode=0644, group=params.user_group, owner=params.zk_user) if params.security_enabled: if type == "server": configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2") configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2") else: configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2") File(os.path.join(params.config_dir, "zoo_sample.cfg"), owner=params.zk_user, group=params.user_group)
def zookeeper(type=None, upgrade_type=None): import params if type == 'server': # This path may be missing after Ambari upgrade. We need to create it. We need to do this before any configs will # be applied. if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version\ and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0: conf_select.select(params.stack_name, "zookeeper", params.current_version) hdp_select.select("zookeeper-server", params.version) Directory(params.config_dir, owner=params.zk_user, recursive=True, group=params.user_group) File(os.path.join(params.config_dir, "zookeeper-env.sh"), content=InlineTemplate(params.zk_env_sh_template), owner=params.zk_user, group=params.user_group) configFile("zoo.cfg", template_name="zoo.cfg.j2") configFile("configuration.xsl", template_name="configuration.xsl.j2") Directory( params.zk_pid_dir, owner=params.zk_user, recursive=True, group=params.user_group, mode=0755, ) Directory( params.zk_log_dir, owner=params.zk_user, recursive=True, group=params.user_group, mode=0755, ) Directory( params.zk_data_dir, owner=params.zk_user, recursive=True, cd_access="a", group=params.user_group, mode=0755, ) if type == 'server': myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1) File(os.path.join(params.zk_data_dir, "myid"), mode=0644, content=myid) if (params.log4j_props != None): File(os.path.join(params.config_dir, "log4j.properties"), mode=0644, group=params.user_group, owner=params.zk_user, content=params.log4j_props) elif (os.path.exists(os.path.join(params.config_dir, "log4j.properties"))): File(os.path.join(params.config_dir, "log4j.properties"), mode=0644, group=params.user_group, owner=params.zk_user) if params.security_enabled: if type == "server": configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2") configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2") else: configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2") File(os.path.join(params.config_dir, "zoo_sample.cfg"), owner=params.zk_user, group=params.user_group)
def pre_rolling_restart(self, env): import params env.set_params(params) conf_select.select(params.stack_name, "mahout", params.version) hdp_select.select("mahout-client", params.version)
def prestart(env, hdp_component): import params if params.version and params.stack_is_hdp22_or_further: conf_select.select(params.stack_name, hdp_component, params.version) hdp_select.select(hdp_component, params.version)