def oozie(is_server=False): import params if is_server: params.HdfsResource(params.oozie_hdfs_user_dir, type="directory", action="create_on_execute", owner=params.oozie_user, mode=params.oozie_hdfs_user_mode) params.HdfsResource(None, action="execute") Directory(params.conf_dir, create_parents=True, owner=params.oozie_user, group=params.user_group) XmlConfig( "oozie-site.xml", conf_dir=params.conf_dir, configurations=params.oozie_site, configuration_attributes=params.config['configuration_attributes'] ['oozie-site'], owner=params.oozie_user, group=params.user_group, mode=0660) File( format("{conf_dir}/oozie-env.sh"), owner=params.oozie_user, content=InlineTemplate(params.oozie_env_sh_template), group=params.user_group, ) if (params.log4j_props != None): File(format("{params.conf_dir}/oozie-log4j.properties"), mode=0644, group=params.user_group, owner=params.oozie_user, content=params.log4j_props) elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))): File(format("{params.conf_dir}/oozie-log4j.properties"), mode=0644, group=params.user_group, owner=params.oozie_user) File(format("{params.conf_dir}/adminusers.txt"), mode=0644, group=params.user_group, owner=params.oozie_user, content=Template('adminusers.txt.j2', oozie_user=params.oozie_user)) environment = {"no_proxy": format("{ambari_server_hostname}")} if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \ params.jdbc_driver_name == "org.postgresql.Driver" or \ params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver": File( format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"), content=DownloadSource( format("{jdk_location}{check_db_connection_jar_name}")), ) pass oozie_ownership() if params.lzo_enabled: install_lzo_if_needed() Execute( format( '{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'), not_if=no_op_test, ) if is_server: oozie_server_specific()
def setup_ranger_plugin(component_select_name, service_name, component_downloaded_custom_connector, component_driver_curl_source, component_driver_curl_target, java_home, repo_name, plugin_repo_dict, ranger_env_properties, plugin_properties, policy_user, policymgr_mgr_url, plugin_enabled, conf_dict, component_user, component_group, cache_service_list, plugin_audit_properties, plugin_audit_attributes, plugin_security_properties, plugin_security_attributes, plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes, component_list, audit_db_is_enabled, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, api_version=None, hdp_version_override=None): File(component_downloaded_custom_connector, content=DownloadSource(component_driver_curl_source)) Execute( ('cp', '--remove-destination', component_downloaded_custom_connector, component_driver_curl_target), not_if=format("test -f {component_driver_curl_target}"), sudo=True) hdp_version = get_hdp_version(component_select_name) if hdp_version_override is not None: hdp_version = hdp_version_override component_conf_dir = conf_dict if plugin_enabled: if api_version == 'v2' and api_version is not None: ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url) else: ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url) ranger_adm_obj.create_ranger_repository( service_name, repo_name, plugin_repo_dict, ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'], ranger_env_properties['admin_username'], ranger_env_properties['admin_password'], policy_user) current_datetime = datetime.now() File( format('{component_conf_dir}/ranger-security.xml'), owner=component_user, group=component_group, mode=0644, content=InlineTemplate( format( '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>' ))) Directory([ os.path.join('/etc', 'ranger', repo_name), os.path.join('/etc', 'ranger', repo_name, 'policycache') ], owner=component_user, group=component_group, mode=0775, recursive=True) for cache_service in cache_service_list: File(os.path.join('/etc', 'ranger', repo_name, 'policycache', format('{cache_service}_{repo_name}.json')), owner=component_user, group=component_group, mode=0644) XmlConfig(format('ranger-{service_name}-audit.xml'), conf_dir=component_conf_dir, configurations=plugin_audit_properties, configuration_attributes=plugin_audit_attributes, owner=component_user, group=component_group, mode=0744) XmlConfig(format('ranger-{service_name}-security.xml'), conf_dir=component_conf_dir, configurations=plugin_security_properties, configuration_attributes=plugin_security_attributes, owner=component_user, group=component_group, mode=0744) XmlConfig("ranger-policymgr-ssl.xml", conf_dir=component_conf_dir, configurations=plugin_policymgr_ssl_properties, configuration_attributes=plugin_policymgr_ssl_attributes, owner=component_user, group=component_group, mode=0744) setup_ranger_plugin_jar_symblink(hdp_version, service_name, component_list) setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, hdp_version, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home) else: File(format('{component_conf_dir}/ranger-security.xml'), action="delete")
def oozie_server_specific(): import params no_op_test = as_user(format( "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1" ), user=params.oozie_user) File(params.pid_file, action="delete", not_if=no_op_test) oozie_server_directories = [ format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir ] Directory( oozie_server_directories, owner=params.oozie_user, group=params.user_group, mode=0755, create_parents=True, cd_access="a", ) Directory( params.oozie_libext_dir, create_parents=True, ) hashcode_file = format("{oozie_home}/.hashcode") hashcode = hashlib.md5( format('{oozie_home}/oozie-sharelib.tar.gz')).hexdigest() skip_recreate_sharelib = format( "test -f {hashcode_file} && test -d {oozie_home}/share && [[ `cat {hashcode_file}` == '{hashcode}' ]]" ) untar_sharelib = ('tar', '-xvf', format('{oozie_home}/oozie-sharelib.tar.gz'), '-C', params.oozie_home) Execute( untar_sharelib, # time-expensive not_if=format("{no_op_test} || {skip_recreate_sharelib}"), sudo=True, ) configure_cmds = [] #configure_cmds.append(('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)) #configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir)) #configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}'))) configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'), params.oozie_webapps_conf_dir)) Execute( configure_cmds, not_if=no_op_test, sudo=True, ) if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \ params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver": File( params.downloaded_custom_connector, content=DownloadSource(params.driver_curl_source), ) Execute( ('cp', '--remove-destination', params.downloaded_custom_connector, params.target), #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc path=["/bin", "/usr/bin/"], sudo=True) File(params.target, owner=params.oozie_user, group=params.user_group) #falcon el extension if params.has_falcon_host: Execute( format('rm -rf {oozie_libext_dir}/falcon-oozie-el-extension.jar'), ) if params.security_enabled: Execute( format( '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab {hdfs_principal_name}' )) Execute( format( 'hadoop fs -get /user/falcon/temp/falcon-oozie-el-extension.jar {oozie_libext_dir}' ), not_if=no_op_test, ) Execute( format( '{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension.jar' ), not_if=no_op_test, ) prepare_war_cmd_file = format("{oozie_home}/.prepare_war_cmd") prepare_war_cmd = format( "cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure}") skip_prepare_war_cmd = format( "test -f {prepare_war_cmd_file} && [[ `cat {prepare_war_cmd_file}` == '{prepare_war_cmd}' ]]" ) Execute( prepare_war_cmd, # time-expensive user=params.oozie_user, not_if=format( "{no_op_test} || {skip_recreate_sharelib} && {skip_prepare_war_cmd}" )) File( hashcode_file, content=hashcode, mode=0644, ) File( prepare_war_cmd_file, content=prepare_war_cmd, mode=0644, ) # Create hive-site and tez-site configs for oozie Directory(params.hive_conf_dir, create_parents=True, owner=params.oozie_user, group=params.user_group) if 'hive-site' in params.config['configurations']: XmlConfig( "hive-site.xml", conf_dir=params.hive_conf_dir, configurations=params.config['configurations']['hive-site'], configuration_attributes=params.config['configuration_attributes'] ['hive-site'], owner=params.oozie_user, group=params.user_group, mode=0640) '''if 'tez-site' in params.config['configurations']: XmlConfig( "tez-site.xml", conf_dir = params.hive_conf_dir, configurations = params.config['configurations']['tez-site'], configuration_attributes=params.config['configuration_attributes']['tez-site'], owner = params.oozie_user, group = params.user_group, mode = 0664 )''' Execute(('chown', '-R', format("{oozie_user}:{user_group}"), params.oozie_server_dir), sudo=True)
def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name): """ Sets up the unlimited key JCE policy if needed. The following criteria must be met: * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False * Ambari is managing the host's JVM - /ambariLevelParams/jdk_name is set * Either security is enabled OR a service requires it - /componentLevelParams/unlimited_key_jce_required = True * The unlimited key JCE policy has not already been installed If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs 1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the Ambari agent's temporary directory 2. The existing JCE policy JAR files are deleted 3. The downloaded ZIP file is unzipped into the proper JCE policy directory :return: None """ import params if params.sysprep_skip_setup_jce: Logger.info( "Skipping unlimited key JCE policy check and setup since the host is sys prepped" ) elif not custom_jdk_name: Logger.info( "Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari" ) elif not params.unlimited_key_jce_required: Logger.info( "Skipping unlimited key JCE policy check and setup since it is not required" ) else: jcePolicyInfo = JcePolicyInfo(custom_java_home) if jcePolicyInfo.is_unlimited_key_jce_policy(): Logger.info( "The unlimited key JCE policy is required, and appears to have been installed." ) elif custom_jce_name is None: raise Fail( "The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified." ) else: Logger.info( "The unlimited key JCE policy is required, and needs to be installed." ) jce_zip_target = format("{artifact_dir}/{custom_jce_name}") jce_zip_source = format( "{ambari_server_resources_url}/{custom_jce_name}") java_security_dir = format("{custom_java_home}/jre/lib/security") Logger.debug( "Downloading the unlimited key JCE policy files from {0} to {1}." .format(jce_zip_source, jce_zip_target)) Directory(params.artifact_dir, create_parents=True) File(jce_zip_target, content=DownloadSource(jce_zip_source)) Logger.debug("Removing existing JCE policy JAR files: {0}.".format( java_security_dir)) File( format("{java_security_dir}/US_export_policy.jar"), action="delete") File( format("{java_security_dir}/local_policy.jar"), action="delete") Logger.debug( "Unzipping the unlimited key JCE policy files from {0} into {1}." .format(jce_zip_target, java_security_dir)) extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir) Execute( extract_cmd, only_if=format( "test -e {java_security_dir} && test -f {jce_zip_target}"), path=['/bin/', '/usr/bin'], sudo=True)
def configure(self, env): import params env.set_params(params) self.create_kylin_log_dir(env) params.server_mode = 'job' # create the pid and kylin dirs Directory( [params.kylin_pid_dir, params.kylin_dir, params.conf_dir], owner=params.kylin_user, group=params.kylin_group, cd_access="a", create_parents=True, mode=0755) self.chown_kylin_pid_dir(env) File( os.path.join(params.conf_dir, "kylin.properties"), content=InlineTemplate(params.kylin_properties_template), owner=params.kylin_user, group=params.kylin_group) File( os.path.join(params.kylin_dir, "bin/kylin-env.sh"), mode=0755, content=InlineTemplate(params.kylin_env_template), owner=params.kylin_user, group=params.kylin_group) XmlConfig( "kylin_hive_conf.xml", conf_dir=params.conf_dir, configurations=params.config['configurations']['kylin_hive_conf'], owner=params.kylin_user, group=params.kylin_group) XmlConfig( "kylin_job_conf.xml", conf_dir=params.conf_dir, configurations=params.config['configurations']['kylin_job_conf'], owner=params.kylin_user, group=params.kylin_group) XmlConfig( "kylin_job_conf_inmem.xml", conf_dir=params.conf_dir, configurations=params.config['configurations'][ 'kylin_job_conf_inmem'], owner=params.kylin_user, group=params.kylin_group) XmlConfig( "kylin-kafka-consumer.xml", conf_dir=params.conf_dir, configurations=params.config['configurations'][ 'kylin-kafka-consumer'], owner=params.kylin_user, group=params.kylin_group) File( os.path.join(params.conf_dir, "kylin-server-log4j.properties"), mode=0644, group=params.kylin_group, owner=params.kylin_user, content=InlineTemplate(params.log4j_server_props)) File( os.path.join(params.conf_dir, "kylin-tools-log4j.properties"), mode=0644, group=params.kylin_group, owner=params.kylin_user, content=InlineTemplate(params.log4j_tool_props))
def hbase(name=None): import params # ensure that matching LZO libraries are installed for HBase lzo_utils.install_lzo_if_needed() Directory(params.etc_prefix_dir, mode=0755) Directory(params.hbase_conf_dir, owner=params.hbase_user, group=params.user_group, create_parents=True) Directory(params.java_io_tmpdir, create_parents=True, mode=0777) # If a file location is specified in ioengine parameter, # ensure that directory exists. Otherwise create the # directory with permissions assigned to hbase:hadoop. ioengine_input = params.ioengine_param if ioengine_input != None: if ioengine_input.startswith("file:/"): ioengine_fullpath = ioengine_input[5:] ioengine_dir = os.path.dirname(ioengine_fullpath) Directory(ioengine_dir, owner=params.hbase_user, group=params.user_group, create_parents=True, mode=0755) parent_dir = os.path.dirname(params.tmp_dir) # In case if we have several placeholders in path while ("${" in parent_dir): parent_dir = os.path.dirname(parent_dir) if parent_dir != os.path.abspath(os.sep): Directory( parent_dir, create_parents=True, cd_access="a", ) Execute(("chmod", "1777", parent_dir), sudo=True) XmlConfig( "hbase-site.xml", conf_dir=params.hbase_conf_dir, configurations=params.config['configurations']['hbase-site'], configuration_attributes=params.config['configuration_attributes'] ['hbase-site'], owner=params.hbase_user, group=params.user_group) if check_stack_feature(StackFeature.PHOENIX_CORE_HDFS_SITE_REQUIRED, params.version_for_stack_feature_checks): XmlConfig( "core-site.xml", conf_dir=params.hbase_conf_dir, configurations=params.config['configurations']['core-site'], configuration_attributes=params.config['configuration_attributes'] ['core-site'], owner=params.hbase_user, group=params.user_group) if 'hdfs-site' in params.config['configurations']: XmlConfig( "hdfs-site.xml", conf_dir=params.hbase_conf_dir, configurations=params.config['configurations']['hdfs-site'], configuration_attributes=params. config['configuration_attributes']['hdfs-site'], owner=params.hbase_user, group=params.user_group) else: File(format("{params.hbase_conf_dir}/hdfs-site.xml"), action="delete") File(format("{params.hbase_conf_dir}/core-site.xml"), action="delete") if 'hbase-policy' in params.config['configurations']: XmlConfig( "hbase-policy.xml", conf_dir=params.hbase_conf_dir, configurations=params.config['configurations']['hbase-policy'], configuration_attributes=params.config['configuration_attributes'] ['hbase-policy'], owner=params.hbase_user, group=params.user_group) # Manually overriding ownership of file installed by hadoop package else: File(format("{params.hbase_conf_dir}/hbase-policy.xml"), owner=params.hbase_user, group=params.user_group) File( format("{hbase_conf_dir}/hbase-env.sh"), owner=params.hbase_user, content=InlineTemplate(params.hbase_env_sh_template), group=params.user_group, ) # On some OS this folder could be not exists, so we will create it before pushing there files Directory(params.limits_conf_dir, create_parents=True, owner='root', group='root') File(os.path.join(params.limits_conf_dir, 'hbase.conf'), owner='root', group='root', mode=0644, content=Template("hbase.conf.j2")) hbase_TemplateConfig('regionservers') if params.security_enabled: hbase_TemplateConfig(format("hbase_{name}_jaas.conf")) if name != "client": Directory( params.pid_dir, owner=params.hbase_user, create_parents=True, cd_access="a", mode=0755, ) Directory( params.log_dir, owner=params.hbase_user, create_parents=True, cd_access="a", mode=0755, ) if (params.log4j_props != None): File(format("{params.hbase_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.hbase_user, content=InlineTemplate(params.log4j_props)) elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))): File(format("{params.hbase_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.hbase_user) if name == "master": params.HdfsResource(params.hbase_hdfs_root_dir, type="directory", action="create_on_execute", owner=params.hbase_user) params.HdfsResource(params.hbase_staging_dir, type="directory", action="create_on_execute", owner=params.hbase_user, mode=0711) if params.create_hbase_home_directory: params.HdfsResource(params.hbase_home_directory, type="directory", action="create_on_execute", owner=params.hbase_user, mode=0755) params.HdfsResource(None, action="execute") if params.phoenix_enabled: Package(params.phoenix_package, retry_on_repo_unavailability=params. agent_stack_retry_on_unavailability, retry_count=params.agent_stack_retry_count)
return db_connection_check_structured_output else: tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=tmp_dir) sudo = AMBARI_SUDO_BINARY if jdk_name.endswith(".bin"): chmod_cmd = ("chmod", "+x", jdk_download_target) install_cmd = format( "cd {tmp_java_dir} && echo A | {jdk_download_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}" ) elif jdk_name.endswith(".gz"): chmod_cmd = ("chmod", "a+x", java_dir) install_cmd = format( "cd {tmp_java_dir} && tar -xf {jdk_download_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}" ) try: Directory(java_dir) Execute(chmod_cmd, not_if=format("test -e {java_exec}"), sudo=True) Execute(install_cmd, not_if=format("test -e {java_exec}")) File(format("{java_home}/bin/java"), mode=0755, cd_access="a") Directory(java_home, owner=getpass.getuser(), recursive_ownership=True) Execute(('chmod', '-R', '755', java_home), sudo=True) except Exception, e: message = "Error installing java.\n" + str(e) Logger.exception(message) db_connection_check_structured_output = {
def oozie_server_specific(): import params File( params.pid_file, action="delete", not_if= "ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)" ) oozie_server_directories = [ format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir ] Directory( oozie_server_directories, owner=params.oozie_user, group=params.user_group, mode=0755, recursive=True, cd_access="a", ) Directory( params.oozie_libext_dir, recursive=True, ) configure_cmds = [] configure_cmds.append( ('tar', '-xvf', format('{oozie_home}/oozie-sharelib.tar.gz'), '-C', params.oozie_home)) configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir)) configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}'))) configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'), params.oozie_webapps_conf_dir)) no_op_test = format( "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1" ) Execute( configure_cmds, not_if=no_op_test, sudo=True, ) if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \ params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \ params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver": File( params.downloaded_custom_connector, content=DownloadSource(params.driver_curl_source), ) Execute( ('cp', '--remove-destination', params.downloaded_custom_connector, params.target), #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc path=["/bin", "/usr/bin/"], sudo=True) #falcon el extension if params.has_falcon_host: Execute( format( 'sudo cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}' ), not_if=no_op_test, ) Execute( format( 'sudo chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar' ), not_if=no_op_test, ) if params.lzo_enabled: Package(params.lzo_packages_for_current_host) Execute( format( 'sudo cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'), not_if=no_op_test, ) Execute(format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war"), user=params.oozie_user, not_if=no_op_test) if params.hdp_stack_version != "" and compare_versions( params.hdp_stack_version, '2.2') >= 0: # Create hive-site and tez-site configs for oozie Directory(params.hive_conf_dir, recursive=True, owner=params.oozie_user, group=params.user_group) if 'hive-site' in params.config['configurations']: XmlConfig( "hive-site.xml", conf_dir=params.hive_conf_dir, configurations=params.config['configurations']['hive-site'], configuration_attributes=params. config['configuration_attributes']['hive-site'], owner=params.oozie_user, group=params.user_group, mode=0644) if 'tez-site' in params.config['configurations']: XmlConfig( "tez-site.xml", conf_dir=params.hive_conf_dir, configurations=params.config['configurations']['tez-site'], configuration_attributes=params. config['configuration_attributes']['tez-site'], owner=params.oozie_user, group=params.user_group, mode=0664) pass
def oozie(is_server=False): import params if is_server: params.HdfsDirectory(params.oozie_hdfs_user_dir, action="create", owner=params.oozie_user, mode=params.oozie_hdfs_user_mode) Directory(params.conf_dir, recursive=True, owner=params.oozie_user, group=params.user_group) XmlConfig( "oozie-site.xml", conf_dir=params.conf_dir, configurations=params.config['configurations']['oozie-site'], configuration_attributes=params.config['configuration_attributes'] ['oozie-site'], owner=params.oozie_user, group=params.user_group, mode=0664) File(format("{conf_dir}/oozie-env.sh"), owner=params.oozie_user, content=InlineTemplate(params.oozie_env_sh_template)) if (params.log4j_props != None): File(format("{params.conf_dir}/oozie-log4j.properties"), mode=0644, group=params.user_group, owner=params.oozie_user, content=params.log4j_props) elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))): File(format("{params.conf_dir}/oozie-log4j.properties"), mode=0644, group=params.user_group, owner=params.oozie_user) if params.hdp_stack_version != "" and compare_versions( params.hdp_stack_version, '2.2') >= 0: File(format("{params.conf_dir}/adminusers.txt"), mode=0644, group=params.user_group, owner=params.oozie_user, content=Template('adminusers.txt.j2', oozie_user=params.oozie_user)) else: File(format("{params.conf_dir}/adminusers.txt"), owner=params.oozie_user, group=params.user_group) if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \ params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \ params.jdbc_driver_name == "org.postgresql.Driver" or \ params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver": File( format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"), content=DownloadSource( format("{jdk_location}{check_db_connection_jar_name}")), ) pass oozie_ownership() if is_server: oozie_server_specific()
return db_connection_check_structured_output else: tmp_java_dir = format("{tmp_dir}/jdk") sudo = AMBARI_SUDO_BINARY if jdk_name.endswith(".bin"): chmod_cmd = ("chmod", "+x", jdk_download_target) install_cmd = format( "mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_download_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}" ) elif jdk_name.endswith(".gz"): chmod_cmd = ("chmod", "a+x", java_dir) install_cmd = format( "mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_download_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}" ) try: Directory(java_dir) Execute(chmod_cmd, not_if=format("test -e {java_exec}"), sudo=True) Execute(install_cmd, not_if=format("test -e {java_exec}")) File(format("{java_home}/bin/java"), mode=0755, cd_access="a") Execute(("chown", "-R", getpass.getuser(), java_home), sudo=True) except Exception, e: message = "Error installing java.\n" + str(e) print message db_connection_check_structured_output = { "exit_code": 1, "message": message
def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar, component_downloaded_custom_connector, component_driver_curl_source, component_driver_curl_target, java_home, repo_name, plugin_repo_dict, ranger_env_properties, plugin_properties, policy_user, policymgr_mgr_url, plugin_enabled, conf_dict, component_user, component_group, cache_service_list, plugin_audit_properties, plugin_audit_attributes, plugin_security_properties, plugin_security_attributes, plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes, component_list, audit_db_is_enabled, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, api_version=None, stack_version_override=None, skip_if_rangeradmin_down=True, is_security_enabled=False, is_stack_supports_ranger_kerberos=False, component_user_principal=None, component_user_keytab=None): if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith( "/None"): if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar): File(previous_jdbc_jar, action='delete') File(component_downloaded_custom_connector, content=DownloadSource(component_driver_curl_source), mode=0644) Execute(('cp', '--remove-destination', component_downloaded_custom_connector, component_driver_curl_target), path=["/bin", "/usr/bin/"], sudo=True) File(component_driver_curl_target, mode=0644) if policymgr_mgr_url.endswith('/'): policymgr_mgr_url = policymgr_mgr_url.rstrip('/') stack_version = get_stack_version(component_select_name) if stack_version_override is not None: stack_version = stack_version_override component_conf_dir = conf_dict if plugin_enabled: if api_version is not None and api_version == 'v2': ranger_adm_obj = RangeradminV2( url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down) ranger_adm_obj.create_ranger_repository( service_name, repo_name, plugin_repo_dict, ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'], ranger_env_properties['admin_username'], ranger_env_properties['admin_password'], policy_user, is_security_enabled, is_stack_supports_ranger_kerberos, component_user, component_user_principal, component_user_keytab) else: ranger_adm_obj = Rangeradmin( url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down) ranger_adm_obj.create_ranger_repository( service_name, repo_name, plugin_repo_dict, ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'], ranger_env_properties['admin_username'], ranger_env_properties['admin_password'], policy_user) current_datetime = datetime.now() File( format('{component_conf_dir}/ranger-security.xml'), owner=component_user, group=component_group, mode=0644, content=InlineTemplate( format( '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>' ))) Directory([ os.path.join('/etc', 'ranger', repo_name), os.path.join('/etc', 'ranger', repo_name, 'policycache') ], owner=component_user, group=component_group, mode=0775, create_parents=True, cd_access='a') for cache_service in cache_service_list: File(os.path.join('/etc', 'ranger', repo_name, 'policycache', format('{cache_service}_{repo_name}.json')), owner=component_user, group=component_group, mode=0644) XmlConfig(format('ranger-{service_name}-audit.xml'), conf_dir=component_conf_dir, configurations=plugin_audit_properties, configuration_attributes=plugin_audit_attributes, owner=component_user, group=component_group, mode=0744) XmlConfig(format('ranger-{service_name}-security.xml'), conf_dir=component_conf_dir, configurations=plugin_security_properties, configuration_attributes=plugin_security_attributes, owner=component_user, group=component_group, mode=0744) if str(service_name).lower() == 'yarn': XmlConfig("ranger-policymgr-ssl-yarn.xml", conf_dir=component_conf_dir, configurations=plugin_policymgr_ssl_properties, configuration_attributes=plugin_policymgr_ssl_attributes, owner=component_user, group=component_group, mode=0744) else: XmlConfig("ranger-policymgr-ssl.xml", conf_dir=component_conf_dir, configurations=plugin_policymgr_ssl_properties, configuration_attributes=plugin_policymgr_ssl_attributes, owner=component_user, group=component_group, mode=0744) #This should be done by rpm #setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list) setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home) else: File(format('{component_conf_dir}/ranger-security.xml'), action="delete")
def configure(self, env): import params import status_params env.set_params(params) env.set_params(status_params) self.create_profile_types_atlas() self.create_ranger_policy(env) self.create_dpprofiler_log_dir(env) self.create_dpprofiler_dir(params) Logger.info("Creating pid directory") Directory([params.dpprofiler_pid_dir], owner=params.dpprofiler_user, group=params.dpprofiler_group, cd_access="a", create_parents=True, mode=0755 ) Directory([params.dpprofiler_conf_dir], owner='root', group='root', cd_access="a", create_parents=True, mode=0755 ) Logger.info("Creating symlink to Profiler Agent conf directory") Execute(format('{sudo} ln -s {params.dpprofiler_conf_dir} {params.dpprofiler_home}'), ignore_failures=True) Logger.info("Writing conf files") File(os.path.join(params.dpprofiler_conf_dir, 'application.conf'), owner=params.dpprofiler_user, group=params.dpprofiler_group, mode=0600, content=Template("application.conf.j2") ) File(os.path.join(params.dpprofiler_conf_dir, 'flyway.conf'), owner=params.dpprofiler_user, group=params.dpprofiler_group, mode=0600, content=Template("flyway.conf.j2") ) File(os.path.join(params.dpprofiler_conf_dir, 'clusterconfigs.conf'), owner=params.dpprofiler_user, group=params.dpprofiler_group, mode=0600, content=Template("clusterconfigs.conf.j2") ) File(os.path.join(params.dpprofiler_conf_dir, 'livyconfigs.conf'), owner=params.dpprofiler_user, group=params.dpprofiler_group, mode=0600, content=Template("livyconfigs.conf.j2") ) File(os.path.join(params.dpprofiler_conf_dir, 'dpprofiler_job_configs.conf'), owner=params.dpprofiler_user, group=params.dpprofiler_group, mode=0600, content=Template("dpprofiler_job_configs.conf.j2") ) if params.dpprofiler_secured: File(os.path.join(params.dpprofiler_conf_dir, 'krb5JAASLogin.conf'), owner=params.dpprofiler_user, group=params.dpprofiler_group, mode=0600, content=Template("krb5JAASLogin.conf.j2") ) # write out logback.xml logback_content = InlineTemplate(params.logback_content) File(format("{params.dpprofiler_conf_dir}/logback.xml"), content=logback_content, owner=params.dpprofiler_user, group=params.dpprofiler_group)
except Exception, e: message = "Error installing java.\n" + str(e) print message db_connection_check_structured_output = {"exit_code" : 1, "message": message} return db_connection_check_structured_output else: tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=tmp_dir) sudo = AMBARI_SUDO_BINARY if jdk_name.endswith(".bin"): chmod_cmd = ("chmod", "+x", jdk_download_target) install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_download_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}") elif jdk_name.endswith(".gz"): chmod_cmd = ("chmod","a+x", java_dir) install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_download_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}") try: Directory(java_dir) Execute(chmod_cmd, not_if = format("test -e {java_exec}"), sudo = True) Execute(install_cmd, not_if = format("test -e {java_exec}")) File(format("{java_home}/bin/java"), mode=0755, cd_access="a") Execute(("chown","-R", getpass.getuser(), java_home), sudo = True) Execute(('chmod', '-R', '755', java_home), sudo = True) except Exception, e: message = "Error installing java.\n" + str(e) print message db_connection_check_structured_output = {"exit_code" : 1, "message": message} return db_connection_check_structured_output finally: Directory(tmp_java_dir, action="delete") # download DBConnectionVerification.jar from ambari-server resources try:
def setup_hadoop(): """ Setup hadoop files and directories """ import params Execute( ("setenforce", "0"), only_if="test -f /selinux/enforce", not_if= "(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)", sudo=True, ) # directories if params.has_namenode: Directory( params.hdfs_log_dir_prefix, create_parents=True, owner='root', group=params.user_group, mode=0775, cd_access='a', ) if params.has_namenode: Directory( params.hadoop_pid_dir_prefix, create_parents=True, owner='root', group='root', cd_access='a', ) Directory( format("{hadoop_pid_dir_prefix}/{hdfs_user}"), owner=params.hdfs_user, cd_access='a', ) Directory( params.hadoop_tmp_dir, create_parents=True, owner=params.hdfs_user, cd_access='a', ) # files if params.security_enabled: tc_owner = "root" else: tc_owner = params.hdfs_user # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS. if not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type): # for source-code of jar goto contrib/fast-hdfs-resource File( format("{ambari_libs_dir}/fast-hdfs-resource.jar"), mode=0644, content=StaticFile("fast-hdfs-resource.jar")) if os.path.exists(params.hadoop_conf_dir): File( os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'), owner=tc_owner, content=Template('commons-logging.properties.j2')) health_check_template_name = "health_check" File( os.path.join(params.hadoop_conf_dir, health_check_template_name), owner=tc_owner, content=Template(health_check_template_name + ".j2")) log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties") if (params.log4j_props != None): File( log4j_filename, mode=0644, group=params.user_group, owner=params.hdfs_user, content=InlineTemplate(params.log4j_props)) elif (os.path.exists( format("{params.hadoop_conf_dir}/log4j.properties"))): File( log4j_filename, mode=0644, group=params.user_group, owner=params.hdfs_user, ) if params.hadoop_metrics2_properties_content: File( os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), owner=params.hdfs_user, group=params.user_group, content=InlineTemplate( params.hadoop_metrics2_properties_content)) else: File( os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), owner=params.hdfs_user, group=params.user_group, content=Template("hadoop-metrics2.properties.j2"))
def storm(): import params Directory(params.log_dir, owner=params.storm_user, group=params.user_group, mode=0777, recursive=True) Directory( [params.pid_dir, params.local_dir], owner=params.storm_user, group=params.user_group, recursive=True, cd_access="a", ) Directory( params.conf_dir, group=params.user_group, recursive=True, cd_access="a", ) File(format("{conf_dir}/config.yaml"), content=Template("config.yaml.j2"), owner=params.storm_user, group=params.user_group) configurations = params.config['configurations']['storm-site'] File(format("{conf_dir}/storm.yaml"), content=yaml_config_template(configurations), owner=params.storm_user, group=params.user_group) if params.has_metric_collector: File(format("{conf_dir}/storm-metrics2.properties"), owner=params.storm_user, group=params.user_group, content=Template("storm-metrics2.properties.j2")) Execute(format( "{sudo} ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar" ), not_if=format( "ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"), only_if=format("ls {metric_collector_sink_jar}")) File(format("{conf_dir}/storm-env.sh"), owner=params.storm_user, content=InlineTemplate(params.storm_env_sh_template)) if params.security_enabled: TemplateConfig(format("{conf_dir}/storm_jaas.conf"), owner=params.storm_user) if params.hdp_stack_version != "" and compare_versions( params.hdp_stack_version, '2.2') >= 0: TemplateConfig(format("{conf_dir}/client_jaas.conf"), owner=params.storm_user) minRuid = configurations[ '_storm.min.ruid'] if configurations.has_key( '_storm.min.ruid') else '' min_user_ruid = int( minRuid) if minRuid.isdigit() else _find_real_user_min_uid() File(format("{conf_dir}/worker-launcher.cfg"), content=Template("worker-launcher.cfg.j2", min_user_ruid=min_user_ruid), owner='root', group=params.user_group)
def setup_spark(env, type, upgrade_type=None, action=None): import params # ensure that matching LZO libraries are installed for Spark lzo_utils.install_lzo_if_needed() Directory( [params.spark_pid_dir, params.spark_log_dir], owner=params.spark_user, group=params.user_group, mode=0775, create_parents=True, cd_access='a', ) if type == 'server' and action == 'config': params.HdfsResource(params.spark_hdfs_user_dir, type="directory", action="create_on_execute", owner=params.spark_user, mode=0775) params.HdfsResource(None, action="execute") PropertiesFile( format("{spark_conf}/spark-defaults.conf"), properties=params.config['configurations']['spark2-defaults'], key_value_delimiter=" ", owner=params.spark_user, group=params.spark_group, mode=0644) # create spark-env.sh in etc/conf dir File( os.path.join(params.spark_conf, 'spark-env.sh'), owner=params.spark_user, group=params.spark_group, content=InlineTemplate(params.spark_env_sh), mode=0644, ) #create log4j.properties in etc/conf dir File( os.path.join(params.spark_conf, 'log4j.properties'), owner=params.spark_user, group=params.spark_group, content=params.spark_log4j_properties, mode=0644, ) #create metrics.properties in etc/conf dir File(os.path.join(params.spark_conf, 'metrics.properties'), owner=params.spark_user, group=params.spark_group, content=InlineTemplate(params.spark_metrics_properties), mode=0644) if params.is_hive_installed: XmlConfig("hive-site.xml", conf_dir=params.spark_conf, configurations=params.spark_hive_properties, owner=params.spark_user, group=params.spark_group, mode=0644) if params.has_spark_thriftserver: PropertiesFile(params.spark_thrift_server_conf_file, properties=params.config['configurations'] ['spark2-thrift-sparkconf'], owner=params.hive_user, group=params.user_group, key_value_delimiter=" ", mode=0644) effective_version = params.version if upgrade_type is not None else params.stack_version_formatted if effective_version: effective_version = format_stack_version(effective_version) if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature( StackFeature.SPARK_16PLUS, effective_version): # create spark-thrift-fairscheduler.xml File(os.path.join(params.spark_conf, "spark-thrift-fairscheduler.xml"), owner=params.spark_user, group=params.spark_group, mode=0755, content=InlineTemplate(params.spark_thrift_fairscheduler_content))
def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar, component_downloaded_custom_connector, component_driver_curl_source, component_driver_curl_target, java_home, repo_name, plugin_repo_dict, ranger_env_properties, plugin_properties, policy_user, policymgr_mgr_url, plugin_enabled, conf_dict, component_user, component_group, cache_service_list, plugin_audit_properties, plugin_audit_attributes, plugin_security_properties, plugin_security_attributes, plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes, component_list, audit_db_is_enabled, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, api_version=None, stack_version_override=None, skip_if_rangeradmin_down=True, is_security_enabled=False, is_stack_supports_ranger_kerberos=False, component_user_principal=None, component_user_keytab=None): if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith( "/None"): if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar): File(previous_jdbc_jar, action='delete') File(component_downloaded_custom_connector, content=DownloadSource(component_driver_curl_source), mode=0644) Execute(('cp', '--remove-destination', component_downloaded_custom_connector, component_driver_curl_target), path=["/bin", "/usr/bin/"], sudo=True) File(component_driver_curl_target, mode=0644) if policymgr_mgr_url.endswith('/'): policymgr_mgr_url = policymgr_mgr_url.rstrip('/') stack_version = get_stack_version(component_select_name) if stack_version_override is not None: stack_version = stack_version_override component_conf_dir = conf_dict if plugin_enabled: service_name_exist = False policycache_path = os.path.join('/etc', 'ranger', repo_name, 'policycache') try: for cache_service in cache_service_list: policycache_json_file = format( '{policycache_path}/{cache_service}_{repo_name}.json') if os.path.isfile(policycache_json_file) and os.path.getsize( policycache_json_file) > 0: with open(policycache_json_file) as json_file: json_data = json.load(json_file) if 'serviceName' in json_data and json_data[ 'serviceName'] == repo_name: service_name_exist = True Logger.info( "Skipping Ranger API calls, as policy cache file exists for {0}" .format(service_name)) Logger.warning( "If service name for {0} is not created on Ranger Admin UI, then to re-create it delete policy cache file: {1}" .format(service_name, policycache_json_file)) break except Exception, err: Logger.error( "Error occurred while fetching service name from policy cache file.\nError: {0}" .format(err)) if not service_name_exist: if api_version is not None and api_version == 'v2': ranger_adm_obj = RangeradminV2( url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down) ranger_adm_obj.create_ranger_repository( service_name, repo_name, plugin_repo_dict, ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'], ranger_env_properties['admin_username'], ranger_env_properties['admin_password'], policy_user, is_security_enabled, is_stack_supports_ranger_kerberos, component_user, component_user_principal, component_user_keytab) else: ranger_adm_obj = Rangeradmin( url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down) ranger_adm_obj.create_ranger_repository( service_name, repo_name, plugin_repo_dict, ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'], ranger_env_properties['admin_username'], ranger_env_properties['admin_password'], policy_user) current_datetime = datetime.now() File( format('{component_conf_dir}/ranger-security.xml'), owner=component_user, group=component_group, mode=0644, content=InlineTemplate( format( '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>' ))) Directory([ os.path.join('/etc', 'ranger', repo_name), os.path.join('/etc', 'ranger', repo_name, 'policycache') ], owner=component_user, group=component_group, mode=0775, create_parents=True, cd_access='a') for cache_service in cache_service_list: File(os.path.join('/etc', 'ranger', repo_name, 'policycache', format('{cache_service}_{repo_name}.json')), owner=component_user, group=component_group, mode=0644) # remove plain-text password from xml configs plugin_audit_password_property = 'xasecure.audit.destination.db.password' plugin_audit_properties_copy = {} plugin_audit_properties_copy.update(plugin_audit_properties) if plugin_audit_password_property in plugin_audit_properties_copy: plugin_audit_properties_copy[ plugin_audit_password_property] = "crypted" XmlConfig(format('ranger-{service_name}-audit.xml'), conf_dir=component_conf_dir, configurations=plugin_audit_properties_copy, configuration_attributes=plugin_audit_attributes, owner=component_user, group=component_group, mode=0744) XmlConfig(format('ranger-{service_name}-security.xml'), conf_dir=component_conf_dir, configurations=plugin_security_properties, configuration_attributes=plugin_security_attributes, owner=component_user, group=component_group, mode=0744) # remove plain-text password from xml configs plugin_password_properties = [ 'xasecure.policymgr.clientssl.keystore.password', 'xasecure.policymgr.clientssl.truststore.password' ] plugin_policymgr_ssl_properties_copy = {} plugin_policymgr_ssl_properties_copy.update( plugin_policymgr_ssl_properties) for prop in plugin_password_properties: if prop in plugin_policymgr_ssl_properties_copy: plugin_policymgr_ssl_properties_copy[prop] = "crypted" if str(service_name).lower() == 'yarn': XmlConfig("ranger-policymgr-ssl-yarn.xml", conf_dir=component_conf_dir, configurations=plugin_policymgr_ssl_properties_copy, configuration_attributes=plugin_policymgr_ssl_attributes, owner=component_user, group=component_group, mode=0744) else: XmlConfig("ranger-policymgr-ssl.xml", conf_dir=component_conf_dir, configurations=plugin_policymgr_ssl_properties_copy, configuration_attributes=plugin_policymgr_ssl_attributes, owner=component_user, group=component_group, mode=0744) # creating symblink should be done by rpm package # setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list) setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home)
def setup_ranger_storm(upgrade_type=None): """ :param upgrade_type: Upgrade Type such as "rolling" or "nonrolling" """ import params if params.enable_ranger_storm and params.security_enabled: stack_version = None if upgrade_type is not None: stack_version = params.version if params.retryAble: Logger.info( "Storm: Setup ranger: command retry enables thus retrying if ranger admin is down !" ) else: Logger.info( "Storm: Setup ranger: command retry not enabled thus skipping if ranger admin is down !" ) if params.xml_configurations_supported and params.enable_ranger_storm and params.xa_audit_hdfs_is_enabled: if params.has_namenode: params.HdfsResource("/ranger/audit", type="directory", action="create_on_execute", owner=params.hdfs_user, group=params.hdfs_user, mode=0755, recursive_chmod=True) params.HdfsResource("/ranger/audit/storm", type="directory", action="create_on_execute", owner=params.storm_user, group=params.storm_user, mode=0700, recursive_chmod=True) params.HdfsResource(None, action="execute") if params.xml_configurations_supported: api_version = None if params.stack_supports_ranger_kerberos: api_version = 'v2' from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin setup_ranger_plugin( 'storm-nimbus', 'storm', params.previous_jdbc_jar, params.downloaded_custom_connector, params.driver_curl_source, params.driver_curl_target, params.java64_home, params.repo_name, params.storm_ranger_plugin_repo, params.ranger_env, params.ranger_plugin_properties, params.policy_user, params.policymgr_mgr_url, params.enable_ranger_storm, conf_dict=params.conf_dir, component_user=params.storm_user, component_group=params.user_group, cache_service_list=['storm'], plugin_audit_properties=params.config['configurations'] ['ranger-storm-audit'], plugin_audit_attributes=params. config['configuration_attributes']['ranger-storm-audit'], plugin_security_properties=params.config['configurations'] ['ranger-storm-security'], plugin_security_attributes=params. config['configuration_attributes']['ranger-storm-security'], plugin_policymgr_ssl_properties=params.config['configurations'] ['ranger-storm-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config[ 'configuration_attributes']['ranger-storm-policymgr-ssl'], component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled, credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password, stack_version_override=stack_version, skip_if_rangeradmin_down=not params.retryAble, api_version=api_version, is_security_enabled=params.security_enabled, is_stack_supports_ranger_kerberos=params. stack_supports_ranger_kerberos, component_user_principal=params.ranger_storm_principal if params.security_enabled else None, component_user_keytab=params.ranger_storm_keytab if params.security_enabled else None) else: from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin setup_ranger_plugin( 'storm-nimbus', 'storm', params.previous_jdbc_jar, params.downloaded_custom_connector, params.driver_curl_source, params.driver_curl_target, params.java64_home, params.repo_name, params.storm_ranger_plugin_repo, params.ranger_env, params.ranger_plugin_properties, params.policy_user, params.policymgr_mgr_url, params.enable_ranger_storm, conf_dict=params.conf_dir, component_user=params.storm_user, component_group=params.user_group, cache_service_list=['storm'], plugin_audit_properties=params.config['configurations'] ['ranger-storm-audit'], plugin_audit_attributes=params. config['configuration_attributes']['ranger-storm-audit'], plugin_security_properties=params.config['configurations'] ['ranger-storm-security'], plugin_security_attributes=params. config['configuration_attributes']['ranger-storm-security'], plugin_policymgr_ssl_properties=params.config['configurations'] ['ranger-storm-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config[ 'configuration_attributes']['ranger-storm-policymgr-ssl'], component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled, credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password, stack_version_override=stack_version, skip_if_rangeradmin_down=not params.retryAble) site_files_create_path = format( '{storm_component_home_dir}/extlib-daemon/ranger-storm-plugin-impl/conf' ) Directory(site_files_create_path, owner=params.storm_user, group=params.user_group, mode=0775, create_parents=True, cd_access='a') if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_storm and params.has_namenode and params.security_enabled: Logger.info( "Stack supports core-site.xml creation for Ranger plugin, creating create core-site.xml from namenode configuraitions" ) setup_core_site_for_required_plugins( component_user=params.storm_user, component_group=params.user_group, create_core_site_path=site_files_create_path, config=params.config) if len(params.namenode_hosts) > 1: Logger.info( 'Ranger Storm plugin is enabled along with security and NameNode is HA , creating hdfs-site.xml' ) XmlConfig("hdfs-site.xml", conf_dir=site_files_create_path, configurations=params.config['configurations'] ['hdfs-site'], configuration_attributes=params. config['configuration_attributes']['hdfs-site'], owner=params.storm_user, group=params.user_group, mode=0644) else: Logger.info( 'Ranger Storm plugin is not enabled or security is disabled, removing hdfs-site.xml' ) File(format('{site_files_create_path}/hdfs-site.xml'), action="delete") else: Logger.info( "Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations" ) else: Logger.info('Ranger Storm plugin is not enabled')
def test_delete_noexisting_directory(self, os_path_exists_mock): os_path_exists_mock.return_value = False with Environment('/') as env: Directory('/a/b/c/d', action='delete')
def titan(type=None, upgrade_type=None): import params import params_server if type == 'server': File(format("{params.titan_server_conf_dir}/gremlin-server.yaml"), mode=0644, group=params.user_group, owner=params.titan_user, content=InlineTemplate(params.gremlin_server_configs)) credentials_file = format("{params.titan_data_dir}/credentials.kryo") if not os.path.isfile(credentials_file): File(credentials_file, mode=0644, group=params.user_group, owner=params.titan_user, content="") credentials_property_file = format( "{params.titan_conf_dir}/tinkergraph-empty.properties") if not os.path.isfile(credentials_property_file): File(credentials_property_file, mode=0644, group=params.user_group, owner=params.titan_user, content=StaticFile("tinkergraph-empty.properties")) Directory(params.titan_log_dir, create_parents=True, owner=params.titan_user, group=params.user_group, mode=0775) Directory(params_server.titan_pid_dir, create_parents=True, owner=params.titan_user, group=params.user_group, mode=0775) File(format("{params.titan_bin_dir}/gremlin-server-script.sh"), mode=0755, group='root', owner='root', content=StaticFile("gremlin-server-script.sh")) Directory(params.titan_conf_dir, create_parents=True, owner=params.titan_user, group=params.user_group) File(format("{params.titan_conf_dir}/titan-env.sh"), mode=0644, group=params.user_group, owner=params.titan_user, content=InlineTemplate(params.titan_env_props)) jaas_client_file = format('{titan_solr_client_jaas_file}') if not os.path.isfile(jaas_client_file) and params.security_enabled: File(jaas_client_file, owner=params.titan_user, group=params.user_group, mode=0644, content=Template('titan_solr_client_jaas.conf.j2')) # SparkGraphComputer Directory(params.titan_conf_hadoop_graph_dir, create_parents=True, owner=params.titan_user, group=params.user_group) File(format("{params.titan_conf_hadoop_graph_dir}/hadoop-gryo.properties"), mode=0644, group=params.user_group, owner=params.titan_user, content=InlineTemplate(params.titan_hadoop_gryo_props)) File(format( "{params.titan_conf_hadoop_graph_dir}/hadoop-hbase-read.properties"), mode=0644, group=params.user_group, owner=params.titan_user, content=InlineTemplate(params.hadoop_hbase_read_props)) # titan-hbase-solr_properties is always set to a default even if it's not in the payload File(format("{params.titan_conf_dir}/titan-hbase-solr.properties"), mode=0644, group=params.user_group, owner=params.titan_user, content=InlineTemplate(params.titan_hbase_solr_props)) if (params.log4j_console_props != None): File(format("{params.titan_conf_dir}/log4j-console.properties"), mode=0644, group=params.user_group, owner=params.titan_user, content=InlineTemplate(params.log4j_console_props)) elif (os.path.exists( format("{params.titan_conf_dir}/log4j-console.properties"))): File(format("{params.titan_conf_dir}/log4j-console.properties"), mode=0644, group=params.user_group, owner=params.titan_user) # Change titan ext directory for multiple user access Directory(params.titan_ext_dir, create_parents=True, owner=params.titan_user, group=params.user_group, mode=0775)
def configure(self, env): import params import status_params env.set_params(params) env.set_params(status_params) self.create_zeppelin_log_dir(env) # create the pid and zeppelin dirs Directory([params.zeppelin_pid_dir, params.zeppelin_dir], owner=params.zeppelin_user, group=params.zeppelin_group, cd_access="a", create_parents=True, mode=0755) self.chown_zeppelin_pid_dir(env) # write out zeppelin-site.xml XmlConfig( "zeppelin-site.xml", conf_dir=params.conf_dir, configurations=params.config['configurations']['zeppelin-config'], owner=params.zeppelin_user, group=params.zeppelin_group) # write out zeppelin-env.sh env_content = InlineTemplate(params.zeppelin_env_content) File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content, owner=params.zeppelin_user, group=params.zeppelin_group) # write out shiro.ini shiro_ini_content = InlineTemplate(params.shiro_ini_content) File(format("{params.conf_dir}/shiro.ini"), content=shiro_ini_content, owner=params.zeppelin_user, group=params.zeppelin_group) # write out log4j.properties File(format("{params.conf_dir}/log4j.properties"), content=params.log4j_properties_content, owner=params.zeppelin_user, group=params.zeppelin_group) self.create_zeppelin_hdfs_conf_dir(env) if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed: # copy hbase-site.xml XmlConfig( "hbase-site.xml", conf_dir=params.external_dependency_conf, configurations=params.config['configurations']['hbase-site'], configuration_attributes=params. config['configuration_attributes']['hbase-site'], owner=params.zeppelin_user, group=params.zeppelin_group, mode=0644) XmlConfig( "hdfs-site.xml", conf_dir=params.external_dependency_conf, configurations=params.config['configurations']['hdfs-site'], configuration_attributes=params. config['configuration_attributes']['hdfs-site'], owner=params.zeppelin_user, group=params.zeppelin_group, mode=0644) XmlConfig( "core-site.xml", conf_dir=params.external_dependency_conf, configurations=params.config['configurations']['core-site'], configuration_attributes=params. config['configuration_attributes']['core-site'], owner=params.zeppelin_user, group=params.zeppelin_group, mode=0644)
def configure(self, env): import params import status_params env.set_params(params) env.set_params(status_params) self.create_zeppelin_log_dir(env) self.create_zeppelin_notebook_dir(env) # create the pid and zeppelin dirs Directory([params.zeppelin_pid_dir, params.zeppelin_dir], owner=params.zeppelin_user, group=params.zeppelin_group, cd_access="a", create_parents=True, mode=0755) self.chown_zeppelin_pid_dir(env) XmlConfig( "zeppelin-site.xml", conf_dir=params.conf_dir, configurations=params.config['configurations']['zeppelin-site'], owner=params.zeppelin_user, group=params.zeppelin_group) # write out zeppelin-env.sh env_content = InlineTemplate(params.zeppelin_env_content) File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content, owner=params.zeppelin_user, group=params.zeppelin_group) # write out shiro.ini shiro_ini_content = InlineTemplate(params.shiro_ini_content) File(format("{params.conf_dir}/shiro.ini"), content=shiro_ini_content, owner=params.zeppelin_user, group=params.zeppelin_group) # write out log4j.properties File(format("{params.conf_dir}/log4j.properties"), content=params.log4j_properties_content, owner=params.zeppelin_user, group=params.zeppelin_group) self.create_zeppelin_hdfs_conf_dir(env) generate_logfeeder_input_config( 'zeppelin', Template("input.config-zeppelin.json.j2", extra_imports=[default])) if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed: # copy hbase-site.xml XmlConfig( "hbase-site.xml", conf_dir=params.external_dependency_conf, configurations=params.config['configurations']['hbase-site'], configuration_attributes=params. config['configurationAttributes']['hbase-site'], owner=params.zeppelin_user, group=params.zeppelin_group, mode=0644) XmlConfig( "hdfs-site.xml", conf_dir=params.external_dependency_conf, configurations=params.config['configurations']['hdfs-site'], configuration_attributes=params. config['configurationAttributes']['hdfs-site'], owner=params.zeppelin_user, group=params.zeppelin_group, mode=0644) XmlConfig( "core-site.xml", conf_dir=params.external_dependency_conf, configurations=params.config['configurations']['core-site'], configuration_attributes=params. config['configurationAttributes']['core-site'], owner=params.zeppelin_user, group=params.zeppelin_group, mode=0644, xml_include_file=params. mount_table_xml_inclusion_file_full_path) if params.mount_table_content: File(params.mount_table_xml_inclusion_file_full_path, owner=params.zeppelin_user, group=params.zeppelin_group, content=params.mount_table_content, mode=0644)