def setup_ranger_audit_solr(): import params random_num = random.random() tmp_config_set_folder = format('{tmp_dir}/ranger_config_{ranger_solr_config_set}_{random_num}') solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum = params.zookeeper_quorum, solr_znode = params.logsearch_solr_znode, config_set = params.ranger_solr_config_set, config_set_dir = params.ranger_solr_conf, tmp_config_set_dir = tmp_config_set_folder, java64_home = params.java_home, user = params.unix_user, retry=30, interval=5) solr_cloud_util.create_collection( zookeeper_quorum = params.zookeeper_quorum, solr_znode = params.logsearch_solr_znode, collection = params.ranger_solr_collection_name, config_set = params.ranger_solr_config_set, java64_home = params.java_home, user = params.unix_user, shards = params.ranger_solr_shards, replication_factor = params.replication_factor)
def setup_ranger_audit_solr(): import params if params.security_enabled and params.stack_supports_ranger_kerberos: if params.solr_jaas_file is not None: File(format("{solr_jaas_file}"), content=Template("ranger_solr_jaas_conf.j2"), owner=params.unix_user) check_znode() solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, config_set=params.ranger_solr_config_set, config_set_dir=params.ranger_solr_conf, tmp_dir=params.tmp_dir, java64_home=params.java_home, jaas_file=params.solr_jaas_file, retry=30, interval=5) solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, collection=params.ranger_solr_collection_name, config_set=params.ranger_solr_config_set, java64_home=params.java_home, shards=params.ranger_solr_shards, replication_factor=int(params.replication_factor), jaas_file=params.solr_jaas_file)
def setup_ranger_audit_solr(): import params if params.security_enabled and params.stack_supports_ranger_kerberos: if params.solr_jaas_file is not None: File(format("{solr_jaas_file}"), content=Template("ranger_solr_jaas_conf.j2"), owner=params.unix_user ) check_znode() if params.stack_supports_ranger_solr_configs: Logger.info('Solr configrations supported,creating solr-configurations.') File(format("{ranger_solr_conf}/solrconfig.xml"), content=InlineTemplate(params.ranger_solr_config_content), owner=params.unix_user, group=params.unix_group, mode=0644 ) solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum = params.zookeeper_quorum, solr_znode = params.solr_znode, config_set = params.ranger_solr_config_set, config_set_dir = params.ranger_solr_conf, tmp_dir = params.tmp_dir, java64_home = params.java_home, solrconfig_content = InlineTemplate(params.ranger_solr_config_content), jaas_file=params.solr_jaas_file, retry=30, interval=5 ) else: Logger.info('Solr configrations not supported, skipping solr-configurations.') solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum = params.zookeeper_quorum, solr_znode = params.solr_znode, config_set = params.ranger_solr_config_set, config_set_dir = params.ranger_solr_conf, tmp_dir = params.tmp_dir, java64_home = params.java_home, jaas_file=params.solr_jaas_file, retry=30, interval=5) solr_cloud_util.create_collection( zookeeper_quorum = params.zookeeper_quorum, solr_znode = params.solr_znode, collection = params.ranger_solr_collection_name, config_set = params.ranger_solr_config_set, java64_home = params.java_home, shards = params.ranger_solr_shards, replication_factor = int(params.replication_factor), jaas_file = params.solr_jaas_file)
def create_collection(collection, config_set, jaasFile): import params solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.infra_solr_znode, collection=collection, config_set=config_set, java64_home=params.java64_home, jaas_file=jaasFile, shards=params.atlas_solr_shards, replication_factor=params.infra_solr_replication_factor)
def create_collection(collection, config_set): import params solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.logsearch_solr_znode, collection = collection, config_set=config_set, java64_home=params.java64_home, user=params.metadata_user, group=params.user_group, shards=params.atlas_solr_shards, replication_factor = params.logsearch_solr_replication_factor)
def create_collection(collection, config_set): import params jaasFile = params.atlas_jaas_file if params.security_enabled else None solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.logsearch_solr_znode, collection=collection, config_set=config_set, java64_home=params.java64_home, user=params.metadata_user, jaas_file=jaasFile, shards=params.atlas_solr_shards, replication_factor=params.logsearch_solr_replication_factor)
def start(self, env, upgrade_type=None): import params env.set_params(params) install_solr() self.configure(env) if params.security_enabled: solr_kinit_cmd = format( "{kinit_path_local} -kt {solr_kerberos_keytab} {solr_kerberos_principal}; " ) Execute(solr_kinit_cmd, user=params.solr_user) if params.is_supported_solr_ranger: setup_ranger_solr() # Ranger Solr Plugin related call if params.restart_during_downgrade: solr_env = {'SOLR_INCLUDE': format('{solr_conf}/solr.in.sh')} else: solr_env = {'SOLR_INCLUDE': format('{solr_conf}/solr-env.sh')} Execute(format( '{solr_bindir}/solr start -cloud -noprompt -s {solr_datadir} >> {solr_log} 2>&1' ), environment=solr_env, user=params.solr_user) if 'ranger-env' in params.config[ 'configurations'] and params.audit_solr_enabled: solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, config_set=params.ranger_solr_config_set, config_set_dir=params.ranger_solr_conf, tmp_dir=params.tmp_dir, java64_home=params.java64_home, jaas_file=params.solr_jaas_file, retry=30, interval=5) solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, collection=params.ranger_solr_collection_name, config_set=params.ranger_solr_config_set, java64_home=params.java64_home, shards=params.ranger_solr_shards, replication_factor=int(params.replication_factor), jaas_file=params.solr_jaas_file)
def create_collection(collection, config_set, jaasFile): import params solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.infra_solr_znode, collection=collection, config_set=config_set, java64_home=params.ambari_java_home, jaas_file=jaasFile, shards=params.atlas_solr_shards, replication_factor=params.infra_solr_replication_factor, trust_store_password=params.truststore_password if params.credential_provider else None, trust_store_type="JKS" if params.credential_provider else None, trust_store_location=params.truststore_location if params.credential_provider else None)
def create_collection(collection, config_set, jaasFile): import params if params.security_enabled: kinit_cmd = format( "{kinit_path_local} -kt {atlas_keytab_path} {atlas_jaas_principal};" ) Execute(kinit_cmd, tries=2, try_sleep=3, user=params.metadata_user, logoutput=True) solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.infra_solr_znode, collection=collection, config_set=config_set, java64_home=params.java64_home, jaas_file=jaasFile, shards=params.atlas_solr_shards, replication_factor=params.infra_solr_replication_factor)
def setup_ranger_audit_solr(): import params if params.security_enabled and params.stack_supports_ranger_kerberos: if params.solr_jaas_file is not None: File(format("{solr_jaas_file}"), content=Template("ranger_solr_jaas_conf.j2"), owner=params.unix_user) try: check_znode() if params.stack_supports_ranger_solr_configs: Logger.info( 'Solr configrations supported,creating solr-configurations.') File(format("{ranger_solr_conf}/solrconfig.xml"), content=InlineTemplate(params.ranger_solr_config_content), owner=params.unix_user, group=params.unix_group, mode=0644) solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, config_set=params.ranger_solr_config_set, config_set_dir=params.ranger_solr_conf, tmp_dir=params.tmp_dir, java64_home=params.java_home, solrconfig_content=InlineTemplate( params.ranger_solr_config_content), jaas_file=params.solr_jaas_file, retry=30, interval=5) else: Logger.info( 'Solr configrations not supported, skipping solr-configurations.' ) solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, config_set=params.ranger_solr_config_set, config_set_dir=params.ranger_solr_conf, tmp_dir=params.tmp_dir, java64_home=params.java_home, jaas_file=params.solr_jaas_file, retry=30, interval=5) if params.security_enabled and params.has_infra_solr \ and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos: solr_cloud_util.add_solr_roles( params.config, roles=[ params.infra_solr_role_ranger_admin, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev ], new_service_principals=[params.ranger_admin_jaas_principal]) service_default_principals_map = [('hdfs', 'nn'), ('hbase', 'hbase'), ('hive', 'hive'), ('kafka', 'kafka'), ('kms', 'rangerkms'), ('knox', 'knox'), ('nifi', 'nifi'), ('storm', 'storm'), ('yanr', 'yarn')] service_principals = get_ranger_plugin_principals( service_default_principals_map) solr_cloud_util.add_solr_roles( params.config, roles=[ params.infra_solr_role_ranger_audit, params.infra_solr_role_dev ], new_service_principals=service_principals) solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, collection=params.ranger_solr_collection_name, config_set=params.ranger_solr_config_set, java64_home=params.java_home, shards=params.ranger_solr_shards, replication_factor=int(params.replication_factor), jaas_file=params.solr_jaas_file) if params.security_enabled and params.has_infra_solr \ and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos: secure_znode( format('{solr_znode}/configs/{ranger_solr_config_set}'), params.solr_jaas_file) secure_znode( format( '{solr_znode}/collections/{ranger_solr_collection_name}'), params.solr_jaas_file) except ExecutionFailed as execution_exception: Logger.error( 'Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {0}' .format(execution_exception))
def janusgraph_service(action='start'): import params import params_server cmd = format("{janusgraph_bin_dir}/gremlin-server-script.sh") cmd_params = params_server.janusgraph_pid_file + " " + params.janusgraph_log_file + " " + params.janusgraph_err_file + " " + params.janusgraph_bin_dir + " " + params.janusgraph_server_conf_dir if action == 'start': if params.security_enabled: kinit_cmd = format( "{kinit_path_local} -kt {janusgraph_keytab_path} {janusgraph_jaas_princ};" ) Execute(kinit_cmd, user=params.janusgraph_user) #Add for SparkGraphComputer, prepare dir /user/janusgraph/data on HDFS, and upload spark jars to /user/spark/share/lib/spark for spark.yarn.jars of Spark on YARN. #create hdfs dir /user/janusgraph/data janusgraph_create_data_dir_command = format( "hadoop fs -mkdir -p {janusgraph_hdfs_data_dir}; hadoop fs -chown -R janusgraph:hdfs /user/janusgraph" ) janusgraph_data_exist_command = format( "hadoop fs -test -e {janusgraph_hdfs_data_dir}>/dev/null 2>&1") Execute(janusgraph_create_data_dir_command, not_if=janusgraph_data_exist_command, logoutput=True, user=params.hdfs_user) #create spark plugin dir for spark jars janusgraph_create_spark_plugin_dir_command = format( "mkdir -p {janusgraph_ext_spark_plugin_dir}") janusgraph_ext_spark_plugin_dir_exist_command = format( "ls {janusgraph_ext_spark_plugin_dir}>/dev/null 2>&1") Execute(janusgraph_create_spark_plugin_dir_command, not_if=janusgraph_ext_spark_plugin_dir_exist_command, logoutput=True, user=params.janusgraph_user) #get spark arhive from hdfs janusgraph_get_spark_tar_command = format( "hadoop fs -get {janusgraph_spark2_archive_dir}/{janusgraph_spark2_archive_file} {janusgraph_ext_spark_plugin_dir}" ) janusgraph_sparktargz_exist_command = format( "ls {janusgraph_ext_spark_plugin_dir}/{janusgraph_spark2_archive_file}>/dev/null 2>&1" ) Execute(janusgraph_get_spark_tar_command, not_if=janusgraph_sparktargz_exist_command, logoutput=True, user=params.janusgraph_user) #extract spark targz janusgraph_x_spark_targz_command = format( "tar -xzvf {janusgraph_ext_spark_plugin_dir}/{janusgraph_spark2_archive_file} -C {janusgraph_ext_spark_plugin_dir}/>/dev/null 2>&1" ) janusgraph_x_sparkjars_exist_command = format( "ls {janusgraph_ext_spark_plugin_dir}/*.jar>/dev/null 2>&1") Execute(janusgraph_x_spark_targz_command, not_if=janusgraph_x_sparkjars_exist_command, logoutput=True, user=params.janusgraph_user) #delete spark targz janusgraph_remove_spark_targz_command = format( "rm -f {janusgraph_ext_spark_plugin_dir}/{janusgraph_spark2_archive_file}" ) janusgraph_sparkjars_still_exist_command = format( "ls {janusgraph_ext_spark_plugin_dir}/{janusgraph_spark2_archive_file}>/dev/null 2>&1" ) janusgraph_sparkjars_deleted_command = format("[[ $? == 1 ]]") Execute(janusgraph_remove_spark_targz_command, not_if=janusgraph_sparkjars_deleted_command, logoutput=True, user=params.janusgraph_user) #create hdfs dir /user/spark/share/lib/spark janusgraph_create_spark_dir_command = format( "hadoop fs -mkdir -p {janusgraph_hdfs_spark_lib_dir}") janusgraph_spark_exist_command = format( "hadoop fs -test -e {janusgraph_hdfs_spark_lib_dir}>/dev/null 2>&1" ) Execute(janusgraph_create_spark_dir_command, not_if=janusgraph_spark_exist_command, logoutput=True, user=params.hdfs_user) #upload spark jars to hdfs /user/spark/share/lib/spark janusgraph_put_spark_jar_command = format( "hadoop fs -put -f {janusgraph_ext_spark_plugin_dir}/* {janusgraph_hdfs_spark_lib_dir}; hadoop fs -rm -r {janusgraph_hdfs_spark_lib_dir}/guava*.jar; hadoop fs -put -f {janusgraph_home_dir}/lib/guava*.jar {janusgraph_hdfs_spark_lib_dir}" ) janusgraph_sparkjar_exist_command = format( "hadoop fs -test -e {janusgraph_hdfs_spark_lib_dir}/*.jar>/dev/null 2>&1" ) Execute(janusgraph_put_spark_jar_command, not_if=janusgraph_sparkjar_exist_command, logoutput=True, user=params.hdfs_user) #rm guava*.jar slf4j-log4j12*.jar spark-core*.jar for conflict janusgraph_rm_conflict_jars_command = format( "rm -rf {janusgraph_ext_spark_plugin_dir}/guava*.jar; rm -rf {janusgraph_ext_spark_plugin_dir}/slf4j-log4j12*.jar; rm -rf {janusgraph_ext_spark_plugin_dir}/spark-core*.jar; " ) janusgraph_guava_exist_command = format( "ls {janusgraph_ext_spark_plugin_dir}/guava*.jar>/dev/null 2>&1") Execute(janusgraph_rm_conflict_jars_command, only_if=janusgraph_guava_exist_command, logoutput=True, user=params.janusgraph_user) #generate yarn-site.xml in JanusGraph conf if no yarn-client installed XmlConfig( "yarn-site.xml", not_if=params.yarn_client_installed, conf_dir=params.janusgraph_conf_dir, configurations=params.config['configurations']['yarn-site'], configuration_attributes=params.config['configuration_attributes'] ['yarn-site'], group=params.user_group, owner=params.janusgraph_user, mode=0644) #create jaas file for solr when security enabled jaas_file = format('{janusgraph_solr_jaas_file}') if not os.path.isfile(jaas_file) and params.security_enabled: File(jaas_file, owner=params.janusgraph_user, group=params.user_group, mode=0644, content=Template('janusgraph_solr_jaas.conf.j2')) #create file for gremlin-server-metrics.csv metrics_file = params.janusgraph_server_csv_metrics janusgraph_create_metrics_dir_command = format( "mkdir -p {janusgraph_server_csv_metrics_dir}") janusgraph_metrics_dir_exist_command = format( "ls {janusgraph_server_csv_metrics_dir}>/dev/null 2>&1") Execute(janusgraph_create_metrics_dir_command, not_if=janusgraph_metrics_dir_exist_command, logoutput=True, user=params.janusgraph_user) File( metrics_file, owner=params.janusgraph_user, group=params.user_group, mode=0644, ) #upload config to zookeeper solr_cloud_util.upload_configuration_to_zk( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, config_set=params.janusgraph_solr_configset, config_set_dir=params.janusgraph_solr_conf_dir, tmp_dir=params.tmp_dir, java64_home=params.java64_home, jaas_file=jaas_file, retry=30, interval=5) #create solr collection solr_cloud_util.create_collection( zookeeper_quorum=params.zookeeper_quorum, solr_znode=params.solr_znode, collection=params.janusgraph_solr_collection_name, config_set=params.janusgraph_solr_configset, java64_home=params.java64_home, shards=params.janusgraph_solr_shards, replication_factor=int(params.infra_solr_replication_factor), jaas_file=jaas_file) daemon_cmd = format(cmd + " start " + cmd_params) no_op_test = format( "ls {params_server.janusgraph_pid_file} >/dev/null 2>&1 && ps `cat {params_server.janusgraph_pid_file}` >/dev/null 2>&1" ) Execute(daemon_cmd, not_if=no_op_test, user=params.janusgraph_user) elif action == 'stop': import params_server daemon_cmd = format( "{janusgraph_bin_dir}/gremlin-server-script.sh stop " + params_server.janusgraph_pid_file) Execute(daemon_cmd, user=params.janusgraph_user)