def setup_atlas_storm(): import params if params.has_atlas: if not params.host_sys_prepped: Package( params.atlas_ubuntu_plugin_package if OSCheck.is_ubuntu_family() else params.atlas_plugin_package, retry_on_repo_unavailability=params. agent_stack_retry_on_unavailability, retry_count=params.agent_stack_retry_count) PropertiesFile(format('{conf_dir}/{atlas_conf_file}'), properties=params.atlas_props, owner=params.storm_user, group=params.user_group, mode=0644) atlas_storm_hook_dir = os.path.join(params.atlas_home_dir, "hook", "storm") if os.path.exists(atlas_storm_hook_dir): storm_extlib_dir = os.path.join(params.storm_component_home_dir, "extlib") if os.path.exists(storm_extlib_dir): src_files = os.listdir(atlas_storm_hook_dir) for file_name in src_files: atlas_storm_hook_file_name = os.path.join( atlas_storm_hook_dir, file_name) storm_lib_file_name = os.path.join(storm_extlib_dir, file_name) if (os.path.isfile(atlas_storm_hook_file_name)): Link(storm_lib_file_name, to=atlas_storm_hook_file_name)
def configure(self, env): import params env.set_params(params) Directory([params.infra_solr_metrics_conf_dir, params.infra_solr_metrics_usr_dir, params.infra_solr_metrics_pid_dir, params.infra_solr_metrics_log_dir], mode=0755, cd_access='a', owner=params.infra_solr_user, group=params.user_group, create_parents=True, recursive_ownership=True ) PropertiesFile(format("{infra_solr_metrics_conf_dir}/infra-solr-metrics.properties"), properties=params.infra_solr_metrics_properties, mode=0644, owner=params.infra_solr_user, group=params.user_group ) File(format("{infra_solr_metrics_conf_dir}/log4j2.xml"), content=InlineTemplate(params.infra_solr_metrics_log4j2_content), owner=params.infra_solr_user, group=params.user_group ) File(format("{infra_solr_metrics_usr_dir}/infra-solr-metrics-env.sh"), content=InlineTemplate(params.infra_solr_metrics_env_content), mode=0755, owner=params.infra_solr_user, group=params.user_group )
def setup_atlas_hook(service_name, service_props, atlas_hook_filepath, owner, group): """ Generate the atlas-application.properties.xml file by merging the service_props with the Atlas application-properties. :param service_name: Service Name to identify if it is a client-only service, which will generate slightly different configs. :param service_props: Atlas configs specific to this service that must be merged. :param atlas_hook_filepath: Config file to write, e.g., /etc/falcon/conf/atlas-application.properties.xml :param owner: File owner :param group: File group """ import params atlas_props = default('/configurations/application-properties', {}) if has_atlas_in_cluster(): # Take the subset merged_props = {} shared_props = SHARED_ATLAS_HOOK_CONFIGS.copy() if service_name in NON_CLIENT_SERVICES: shared_props = shared_props.union( SHARED_ATLAS_HOOK_SECURITY_CONFIGS_FOR_NON_CLIENT_SERVICE) for prop in shared_props: if prop in atlas_props: merged_props[prop] = atlas_props[prop] merged_props.update(service_props) Logger.info( format("Generating Atlas Hook config file {atlas_hook_filepath}")) PropertiesFile(atlas_hook_filepath, properties=merged_props, owner=owner, group=group, mode=0644)
def setup_usersync(upgrade_type=None): import params PropertiesFile(format("{usersync_home}/install.properties"), properties = params.config['configurations']['usersync-properties'], ) custom_config = dict() custom_config['unix_user'] = params.unix_user custom_config['unix_group'] = params.unix_group ModifyPropertiesFile(format("{usersync_home}/install.properties"), properties=custom_config ) cmd = format("cd {usersync_home} && ") + as_sudo([format('{usersync_home}/setup.sh')]) Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True) File([params.usersync_start, params.usersync_stop], owner = params.unix_user ) File(params.usersync_services_file, mode = 0755, ) Directory(params.usersync_log_dir, owner = params.unix_user, group = params.unix_group )
def configure(self, env, upgrade_type=None): import params Directory([ params.superset_pid_dir, params.superset_log_dir, params.superset_config_dir, params.superset_home_dir ], mode=0755, cd_access='a', owner=params.superset_user, group=params.user_group, create_parents=True, recursive_ownership=True) File(format("{params.superset_config_dir}/superset-env.sh"), mode=0755, owner=params.superset_user, group=params.user_group, content=InlineTemplate(params.superset_env_sh_template)) File(os.path.join(params.superset_bin_dir, 'superset.sh'), owner=params.superset_user, group=params.user_group, mode=0755, content=Template("superset.sh")) superset_config = mutable_config_dict( params.config["configurations"]["superset"]) if params.superset_db_uri: superset_config["SQLALCHEMY_DATABASE_URI"] = params.superset_db_uri PropertiesFile("superset_config.py", dir=params.superset_config_dir, properties=quote_string_values(superset_config), owner=params.superset_user, group=params.user_group) # Initialize DB and create admin user. Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset db upgrade" ), user=params.superset_user) Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/fabmanager create-admin --app superset --username '{params.superset_admin_user}' --password '{params.superset_admin_password!p}' --firstname '{params.superset_admin_firstname}' --lastname '{params.superset_admin_lastname}' --email '{params.superset_admin_email}'" ), user=params.superset_user) Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset init" ), user=params.superset_user) # Configure Druid Cluster in superset DB if len(params.druid_coordinator_hosts) > 0: Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset configure_druid_cluster --name druid-ambari --coordinator-host {params.druid_coordinator_host} --coordinator-port {params.druid_coordinator_port} --broker-host {params.druid_router_host} --broker-port {params.druid_router_port} --coordinator-endpoint druid/coordinator/v1/metadata --broker-endpoint druid/v2" ), user=params.superset_user)
def flume(action=None): import params from service_mapping import flume_win_service_name if action == 'config': ServiceConfig(flume_win_service_name, action="configure", start_type="manual") ServiceConfig(flume_win_service_name, action="change_user", username=params.flume_user, password=Script.get_password(params.flume_user)) # remove previously defined meta's for n in find_expected_agent_names(params.flume_conf_dir): os.unlink( os.path.join(params.flume_conf_dir, n, 'ambari-meta.json')) flume_agents = {} if params.flume_conf_content is not None: flume_agents = build_flume_topology(params.flume_conf_content) for agent in flume_agents.keys(): flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent) flume_agent_conf_file = os.path.join(flume_agent_conf_dir, 'flume.conf') flume_agent_meta_file = os.path.join(flume_agent_conf_dir, 'ambari-meta.json') flume_agent_log4j_file = os.path.join(flume_agent_conf_dir, 'log4j.properties') flume_agent_env_file = os.path.join(flume_agent_conf_dir, 'flume-env.ps1') Directory(flume_agent_conf_dir) PropertiesFile(flume_agent_conf_file, properties=flume_agents[agent]) File(flume_agent_log4j_file, content=InlineTemplate(params.flume_log4j_content, agent_name=agent)), File(flume_agent_meta_file, content=json.dumps(ambari_meta(agent, flume_agents[agent]))) File(flume_agent_env_file, owner=params.flume_user, content=InlineTemplate(params.flume_env_sh_template)) if params.has_metric_collector: File(os.path.join(flume_agent_conf_dir, "flume-metrics2.properties"), owner=params.flume_user, content=Template("flume-metrics2.properties.j2"))
def setup_logfeeder(): import params Directory([ params.logfeeder_log_dir, params.logfeeder_pid_dir, params.logfeeder_checkpoint_folder ], mode=0755, cd_access='a', create_parents=True) Directory([params.logfeeder_dir, params.logsearch_logfeeder_conf], mode=0755, cd_access='a', create_parents=True, recursive_ownership=True) File(params.logfeeder_log, mode=0644, content='') PropertiesFile(format("{logsearch_logfeeder_conf}/logfeeder.properties"), properties=params.logfeeder_properties) File(format("{logsearch_logfeeder_conf}/logfeeder-env.sh"), content=InlineTemplate(params.logfeeder_env_content), mode=0755) File(format("{logsearch_logfeeder_conf}/log4j.xml"), content=InlineTemplate(params.logfeeder_log4j_content)) File(format("{logsearch_logfeeder_conf}/grok-patterns"), content=InlineTemplate(params.logfeeder_grok_patterns), encoding="utf-8") for file_name in params.logfeeder_default_config_file_names: File(format("{logsearch_logfeeder_conf}/" + file_name), content=Template(file_name + ".j2")) File(format( "{logsearch_logfeeder_conf}/input.config-logfeeder-custom.json"), action='delete') for service, pattern_content in params.logfeeder_metadata.iteritems(): File(format("{logsearch_logfeeder_conf}/input.config-" + service.replace('-logsearch-conf', '') + ".json"), content=InlineTemplate(pattern_content, extra_imports=[default])) if params.logfeeder_system_log_enabled: File(format( "{logsearch_logfeeder_conf}/input.config-system_messages.json"), content=params.logfeeder_system_messages_content) File(format("{logsearch_logfeeder_conf}/input.config-secure_log.json"), content=params.logfeeder_secure_log_content) if params.security_enabled: File(format("{logfeeder_jaas_file}"), content=Template("logfeeder_jaas.conf.j2"))
def create_atlas_configs(): import params if params.sac_enabled: atlas_application_properties = params.application_properties atlas_application_properties_override = params.application_properties_override atlas_application_properties_yarn = params.application_properties_yarn for property_name in params.atlas_application_properties_to_include: if property_name in atlas_application_properties and not property_name in atlas_application_properties_override: atlas_application_properties_override[ property_name] = atlas_application_properties[ property_name] if params.security_enabled: for property_name in params.secure_atlas_application_properties_to_include.keys( ): if not property_name in atlas_application_properties_override: atlas_application_properties_override[ property_name] = params.secure_atlas_application_properties_to_include[ property_name] PropertiesFile(params.atlas_properties_path, properties=atlas_application_properties_override, mode=0644, owner=params.spark_user, group=params.user_group) atlas_application_properties_override_copy = atlas_application_properties_override.copy( ) if params.security_enabled: atlas_application_properties_override_copy.pop( "atlas.jaas.KafkaClient.option.keyTab") atlas_application_properties_override_copy.update( atlas_application_properties_yarn) atlas_application_properties_yarn = atlas_application_properties_override_copy PropertiesFile(params.atlas_properties_for_yarn_path, properties=atlas_application_properties_yarn, mode=0644, owner=params.spark_user, group=params.user_group)
def setup_atlas_hive(): import params if params.has_atlas: if not params.host_sys_prepped: Package(params.atlas_plugin_package, # FIXME HACK: install the package during RESTART/START when install_packages is not triggered. ) PropertiesFile(format('{hive_config_dir}/client.properties'), properties = params.atlas_client_props, owner = params.hive_user, group = params.user_group, mode = 0644)
def setup_atlas_falcon(): import params if params.has_atlas: if not params.host_sys_prepped: Package(params.atlas_ubuntu_plugin_package if OSCheck.is_ubuntu_family() else params.atlas_plugin_package, retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, retry_count=params.agent_stack_retry_count) PropertiesFile(format('{falcon_conf_dir}/{atlas_conf_file}'), properties = params.atlas_props, owner = params.falcon_user, group = params.user_group, mode = 0644)
def setup_data_analytics_studio_configs(): import params Directory(os.path.dirname(params.conf_dir), owner=params.data_analytics_studio_user, create_parents=True, mode=0755) Directory(params.conf_dir, owner=params.data_analytics_studio_user, create_parents=True, mode=0755) Directory(params.data_analytics_studio_pid_dir, owner=params.data_analytics_studio_user, create_parents=True, mode=0755) Directory(params.data_analytics_studio_log_dir, owner=params.data_analytics_studio_user, create_parents=True, mode=0755) File(params.das_conf, content=InlineTemplate(params.das_conf_content), owner=params.data_analytics_studio_user, mode=0400) PropertiesFile(params.das_hive_site_conf, properties=params.das_hive_site_conf_dict, owner=params.data_analytics_studio_user, mode=0400) PropertiesFile(params.das_hive_interactive_site_conf, properties=params.das_hive_interactive_site_conf_dict, owner=params.data_analytics_studio_user, mode=0400)
def configure(self, env, isInstall=False): import params env.set_params(params) kafka_consumer_config = mutable_config_dict( params.config['configurations']['kafka-consumer']) PropertiesFile( "consumer.properties", dir=params.conf_dir, properties=kafka_consumer_config, owner=params.cmak_user, group=params.user_group, ) kafka_manager_config = mutable_config_dict( params.config['configurations']['kafka-manager']) # kafka_manager_config['kafka-manager.zkhosts'] = '"' + params.zookeeper_connect + '"' PropertiesFile( "application.conf", dir=params.conf_dir, properties=kafka_manager_config, owner=params.cmak_user, group=params.user_group, )
def setup_atlas_hive(configuration_directory=None): import params if params.has_atlas: if configuration_directory is None: configuration_directory = format("{hive_config_dir}") if not params.host_sys_prepped: Package(params.atlas_ubuntu_plugin_package if OSCheck.is_ubuntu_family() else params.atlas_plugin_package, # FIXME HACK: install the package during RESTART/START when install_packages is not triggered. ) PropertiesFile(format('{configuration_directory}/client.properties'), properties = params.atlas_client_props, owner = params.hive_user, group = params.user_group, mode = 0644)
def connect(kafka_broker_config): import params connect_config = mutable_config_dict(params.config['configurations']['connect-conf']) connect_config['bootstrap.servers'] = build_broker_list(params.kafka_hosts, kafka_broker_config['listeners']) PropertiesFile("connect-npi.properties", dir=params.conf_dir, properties=connect_config, owner=params.kafka_user, group=params.user_group, ) if (params.connect_log4j_props != None): File(format("{conf_dir}/connect-log4j.properties"), mode=0644, group=params.user_group, owner=params.kafka_user, content=params.connect_log4j_props )
def configure(self, env): import params, status_params env.set_params(params) env.set_params(status_params) File(format("/home/{app_user}/{app_package_bundle}"), content=DownloadSource( format(format("{app_repository_url}/{app_package_bundle}")), redownload_files=("SNAPSHOT" in format("{app_package_bundle}"))), mode=0644, owner=format('{app_user}')) Execute(format('tar -xzvf /home/{app_user}/{app_package_bundle}'), user=format('{app_user}')) Execute( format( 'ln -sfv /home/{app_user}/{app_name}/{app_name}.jar /etc/init.d/{app_name}' )) for t in params.app_configuration_templates: File(format('/home/{app_user}/{app_name}/{t.to}'), content=Template(format("{t.from}")), mode=0644, owner=format('{app_user}'), group=format('{app_group}')) properties = {} for k, v in params.config['configurations'][format( '{app_name}')].items(): properties[k] = format(v) PropertiesFile(format('application.properties'), dir=format('/home/{app_user}/{app_name}/'), properties=properties, owner=format('{app_user}'), group=format('{app_group}'))
def setup_tagsync(upgrade_type=None): import params ranger_tagsync_home = params.ranger_tagsync_home ranger_home = params.ranger_home ranger_tagsync_conf = params.ranger_tagsync_conf Directory(format("{ranger_tagsync_conf}"), owner=params.unix_user, group=params.unix_group, create_parents=True) Directory( params.ranger_pid_dir, mode=0755, create_parents=True, owner=params.unix_user, group=params.user_group, cd_access="a", ) if params.stack_supports_pid: File( format('{ranger_tagsync_conf}/ranger-tagsync-env-piddir.sh'), content=format( "export TAGSYNC_PID_DIR_PATH={ranger_pid_dir}\nexport UNIX_TAGSYNC_USER={unix_user}" ), owner=params.unix_user, group=params.unix_group, mode=0755) Directory(params.tagsync_log_dir, create_parents=True, owner=params.unix_user, group=params.unix_group, cd_access="a", mode=0755) File(format('{ranger_tagsync_conf}/ranger-tagsync-env-logdir.sh'), content=format("export RANGER_TAGSYNC_LOG_DIR={tagsync_log_dir}"), owner=params.unix_user, group=params.unix_group, mode=0755) XmlConfig( "ranger-tagsync-site.xml", conf_dir=ranger_tagsync_conf, configurations=params.config['configurations']['ranger-tagsync-site'], configuration_attributes=params.config['configuration_attributes'] ['ranger-tagsync-site'], owner=params.unix_user, group=params.unix_group, mode=0644) if params.stack_supports_ranger_tagsync_ssl_xml_support: Logger.info( "Stack supports tagsync-ssl configurations, performing the same.") setup_tagsync_ssl_configs() else: Logger.info( "Stack doesnt support tagsync-ssl configurations, skipping the same." ) PropertiesFile( format('{ranger_tagsync_conf}/atlas-application.properties'), properties=params.tagsync_application_properties, mode=0755, owner=params.unix_user, group=params.unix_group) File(format('{ranger_tagsync_conf}/log4j.properties'), owner=params.unix_user, group=params.unix_group, content=InlineTemplate(params.tagsync_log4j), mode=0644) File( params.tagsync_services_file, mode=0755, ) Execute(('ln', '-sf', format('{tagsync_services_file}'), '/usr/bin/ranger-tagsync'), not_if=format("ls /usr/bin/ranger-tagsync"), only_if=format("ls {tagsync_services_file}"), sudo=True) create_core_site_xml(ranger_tagsync_conf)
def kafka(upgrade_type=None): import params ensure_base_directories() kafka_server_config = mutable_config_dict( params.config['configurations']['kafka-broker']) # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2. # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to. effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version( params.version) Logger.info(format("Effective stack version: {effective_version}")) # In HDP-2.2 (Apache Kafka 0.8.1.1) we used to generate broker.ids based on hosts and add them to # kafka's server.properties. In future version brokers can generate their own ids based on zookeeper seq # We need to preserve the broker.id when user is upgrading from HDP-2.2 to any higher version. # Once its preserved it will be written to kafka.log.dirs/meta.properties and it will be used from there on # similarly we need preserve port as well during the upgrade if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and \ check_stack_feature(StackFeature.CREATE_KAFKA_BROKER_ID, params.current_version) and \ check_stack_feature(StackFeature.KAFKA_LISTENERS, params.version): if len(params.kafka_hosts ) > 0 and params.hostname in params.kafka_hosts: brokerid = str(sorted(params.kafka_hosts).index(params.hostname)) kafka_server_config['broker.id'] = brokerid Logger.info(format("Calculating broker.id as {brokerid}")) if 'port' in kafka_server_config: port = kafka_server_config['port'] Logger.info(format("Port config from previous verson: {port}")) listeners = kafka_server_config['listeners'] kafka_server_config['listeners'] = listeners.replace("6667", port) Logger.info( format("Kafka listeners after the port update: {listeners}")) del kafka_server_config['port'] if effective_version is not None and effective_version != "" and \ check_stack_feature(StackFeature.CREATE_KAFKA_BROKER_ID, effective_version): if len(params.kafka_hosts ) > 0 and params.hostname in params.kafka_hosts: brokerid = str(sorted(params.kafka_hosts).index(params.hostname)) kafka_server_config['broker.id'] = brokerid Logger.info(format("Calculating broker.id as {brokerid}")) # listeners and advertised.listeners are only added in 2.3.0.0 onwards. if effective_version is not None and effective_version != "" and \ check_stack_feature(StackFeature.KAFKA_LISTENERS, effective_version): listeners = kafka_server_config['listeners'].replace( "localhost", params.hostname) Logger.info(format("Kafka listeners: {listeners}")) kafka_server_config['listeners'] = listeners if params.security_enabled and params.kafka_kerberos_enabled: Logger.info("Kafka kerberos security is enabled.") kafka_server_config['advertised.listeners'] = listeners Logger.info(format("Kafka advertised listeners: {listeners}")) elif 'advertised.listeners' in kafka_server_config: advertised_listeners = kafka_server_config[ 'advertised.listeners'].replace("localhost", params.hostname) kafka_server_config['advertised.listeners'] = advertised_listeners Logger.info( format("Kafka advertised listeners: {advertised_listeners}")) else: kafka_server_config['host.name'] = params.hostname if params.has_metric_collector: kafka_server_config[ 'kafka.timeline.metrics.hosts'] = params.ams_collector_hosts kafka_server_config[ 'kafka.timeline.metrics.port'] = params.metric_collector_port kafka_server_config[ 'kafka.timeline.metrics.protocol'] = params.metric_collector_protocol kafka_server_config[ 'kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path kafka_server_config[ 'kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type kafka_server_config[ 'kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password kafka_data_dir = kafka_server_config['log.dirs'] kafka_data_dirs = filter(None, kafka_data_dir.split(",")) Directory( kafka_data_dirs, mode=0755, cd_access='a', owner=params.kafka_user, group=params.user_group, create_parents=True, recursive_ownership=True, ) PropertiesFile( "server.properties", dir=params.conf_dir, properties=kafka_server_config, owner=params.kafka_user, group=params.user_group, ) File(format("{conf_dir}/kafka-env.sh"), owner=params.kafka_user, content=InlineTemplate(params.kafka_env_sh_template)) if (params.log4j_props != None): File(format("{conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.kafka_user, content=params.log4j_props) if params.security_enabled and params.kafka_kerberos_enabled: if params.kafka_jaas_conf_template: File(format("{conf_dir}/kafka_jaas.conf"), owner=params.kafka_user, content=InlineTemplate(params.kafka_jaas_conf_template)) else: TemplateConfig(format("{conf_dir}/kafka_jaas.conf"), owner=params.kafka_user) if params.kafka_client_jaas_conf_template: File(format("{conf_dir}/kafka_client_jaas.conf"), owner=params.kafka_user, content=InlineTemplate( params.kafka_client_jaas_conf_template)) else: TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"), owner=params.kafka_user) # On some OS this folder could be not exists, so we will create it before pushing there files Directory(params.limits_conf_dir, create_parents=True, owner='root', group='root') File(os.path.join(params.limits_conf_dir, 'kafka.conf'), owner='root', group='root', mode=0644, content=Template("kafka.conf.j2")) File(os.path.join(params.conf_dir, 'tools-log4j.properties'), owner='root', group='root', mode=0644, content=Template("tools-log4j.properties.j2")) setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir) setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
def setup_logfeeder(): import params Directory([ params.logfeeder_log_dir, params.logfeeder_pid_dir, params.logfeeder_checkpoint_folder ], mode=0755, cd_access='a', create_parents=True) Directory([params.logfeeder_dir, params.logsearch_logfeeder_conf], mode=0755, cd_access='a', create_parents=True, recursive_ownership=True) File(format("{logfeeder_log_dir}/{logfeeder_log}"), mode=0644, content='') if params.credential_store_enabled: params.logfeeder_env_config = update_credential_provider_path( params.logfeeder_env_config, 'logfeeder-env', params.logfeeder_env_jceks_file, params.logsearch_user, params.user_group) params.logfeeder_properties[ HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file' + params.logfeeder_env_jceks_file File(format("{logsearch_logfeeder_keys_folder}/ks_pass.txt"), action="delete") File(format("{logsearch_logfeeder_keys_folder}/ts_pass.txt"), action="delete") else: Directory(params.logsearch_logfeeder_keys_folder, cd_access='a', mode=0755, owner=params.logsearch_user, group=params.user_group) File(format("{logsearch_logfeeder_keys_folder}/ks_pass.txt"), content=params.logfeeder_keystore_password, mode=0600, owner=params.logsearch_user, group=params.user_group) File(format("{logsearch_logfeeder_keys_folder}/ts_pass.txt"), content=params.logfeeder_truststore_password, mode=0600, owner=params.logsearch_user, group=params.user_group) PropertiesFile(format("{logsearch_logfeeder_conf}/logfeeder.properties"), properties=params.logfeeder_properties) File(format("{logsearch_logfeeder_conf}/logfeeder-env.sh"), content=InlineTemplate(params.logfeeder_env_content), mode=0755) File(format("{logsearch_logfeeder_conf}/log4j.xml"), content=InlineTemplate(params.logfeeder_log4j_content)) File(format("{logsearch_logfeeder_conf}/grok-patterns"), content=InlineTemplate(params.logfeeder_grok_patterns), encoding="utf-8") File(format("{logsearch_logfeeder_conf}/global.config.json"), content=Template("global.config.json.j2")) File(format("{logsearch_logfeeder_conf}/input.config-ambari.json"), content=InlineTemplate(params.logfeeder_ambari_config_content), encoding="utf-8") File(format("{logsearch_logfeeder_conf}/output.config.json"), content=InlineTemplate(params.logfeeder_output_config_content), encoding="utf-8") if params.logfeeder_system_log_enabled: File(format( "{logsearch_logfeeder_conf}/input.config-system_messages.json"), content=params.logfeeder_system_messages_content) File(format("{logsearch_logfeeder_conf}/input.config-secure_log.json"), content=params.logfeeder_secure_log_content) if params.logsearch_solr_kerberos_enabled: File(format("{logfeeder_jaas_file}"), content=Template("logfeeder_jaas.conf.j2"))
def druid(upgrade_type=None, nodeType=None): import params ensure_base_directories() # Environment Variables File(format("{params.druid_conf_dir}/druid-env.sh"), owner=params.druid_user, content=InlineTemplate(params.druid_env_sh_template) ) # common config druid_common_config = mutable_config_dict(params.config['configurations']['druid-common']) # User cannot override below configs druid_common_config['druid.host'] = params.hostname druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][ 'druid.service'] druid_common_config['druid.selectors.coordinator.serviceName'] = \ params.config['configurations']['druid-coordinator']['druid.service'] # delete the password and user if empty otherwiswe derby will fail. if 'derby' == druid_common_config['druid.metadata.storage.type']: del druid_common_config['druid.metadata.storage.connector.user'] del druid_common_config['druid.metadata.storage.connector.password'] druid_env_config = mutable_config_dict(params.config['configurations']['druid-env']) PropertiesFile("common.runtime.properties", dir=params.druid_common_conf_dir, properties=druid_common_config, owner=params.druid_user, group=params.user_group, ) Logger.info("Created common.runtime.properties") File(format("{params.druid_common_conf_dir}/druid-log4j.xml"), mode=0644, owner=params.druid_user, group=params.user_group, content=InlineTemplate(params.log4j_props) ) Logger.info("Created log4j file") File("/etc/logrotate.d/druid", mode=0644, owner='root', group='root', content=InlineTemplate(params.logrotate_props) ) Logger.info("Created log rotate file") # Write Hadoop Configs if configured if 'core-site' in params.config['configurations']: XmlConfig("core-site.xml", conf_dir=params.druid_common_conf_dir, configurations=params.config['configurations']['core-site'], configuration_attributes=params.config['configuration_attributes']['core-site'], owner=params.druid_user, group=params.user_group ) if 'mapred-site' in params.config['configurations']: XmlConfig("mapred-site.xml", conf_dir=params.druid_common_conf_dir, configurations=params.config['configurations']['mapred-site'], configuration_attributes=params.config['configuration_attributes']['mapred-site'], owner=params.druid_user, group=params.user_group ) if 'yarn-site' in params.config['configurations']: XmlConfig("yarn-site.xml", conf_dir=params.druid_common_conf_dir, configurations=params.config['configurations']['yarn-site'], configuration_attributes=params.config['configuration_attributes']['yarn-site'], owner=params.druid_user, group=params.user_group ) if 'hdfs-site' in params.config['configurations']: XmlConfig("hdfs-site.xml", conf_dir=params.druid_common_conf_dir, configurations=params.config['configurations']['hdfs-site'], configuration_attributes=params.config['configuration_attributes']['hdfs-site'], owner=params.druid_user, group=params.user_group ) # node specific configs for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']: node_config_dir = format('{params.druid_conf_dir}/{node_type}') node_type_lowercase = node_type.lower() # Write runtime.properties file node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')]) PropertiesFile("runtime.properties", dir=node_config_dir, properties=node_config, owner=params.druid_user, group=params.user_group, ) Logger.info(format("Created druid-{node_type_lowercase} runtime.properties")) # Write jvm configs File(format('{node_config_dir}/jvm.config'), owner=params.druid_user, group=params.user_group, content=InlineTemplate( "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}", node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')], log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"), node_direct_memory=druid_env_config[ format('druid.{node_type_lowercase}.jvm.direct.memory')], node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')]) ) Logger.info(format("Created druid-{node_type_lowercase} jvm.config")) # All druid nodes have dependency on hdfs_client ensure_hadoop_directories() # Pull all required dependencies pulldeps()
def metadata(type='server'): import params # Needed by both Server and Client Directory(params.conf_dir, mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) if type == "server": Directory([params.pid_dir], mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) Directory(format('{conf_dir}/solr'), mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True, recursive_ownership=True) Directory(params.log_dir, mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) Directory(params.data_dir, mode=0644, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) Directory(params.expanded_war_dir, mode=0644, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) File(format("{expanded_war_dir}/atlas.war"), content=StaticFile( format('{metadata_home}/server/webapp/atlas.war'))) File(format("{conf_dir}/atlas-log4j.xml"), mode=0644, owner=params.metadata_user, group=params.user_group, content=InlineTemplate(params.metadata_log4j_content)) File(format("{conf_dir}/atlas-env.sh"), owner=params.metadata_user, group=params.user_group, mode=0755, content=InlineTemplate(params.metadata_env_content)) if not is_empty(params.atlas_admin_username) and not is_empty( params.atlas_admin_password): psswd_output = hashlib.sha256( params.atlas_admin_password).hexdigest() ModifyPropertiesFile( format("{conf_dir}/users-credentials.properties"), properties={ format('{atlas_admin_username}'): format('ROLE_ADMIN::{psswd_output}') }, owner=params.metadata_user) files_to_chown = [ format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties") ] for file in files_to_chown: if os.path.exists(file): Execute( ('chown', format('{metadata_user}:{user_group}'), file), sudo=True) Execute(('chmod', '644', file), sudo=True) if params.metadata_solrconfig_content: File(format("{conf_dir}/solr/solrconfig.xml"), mode=0644, owner=params.metadata_user, group=params.user_group, content=InlineTemplate(params.metadata_solrconfig_content)) # Needed by both Server and Client PropertiesFile(format('{conf_dir}/{conf_file}'), properties=params.application_properties, mode=0600, owner=params.metadata_user, group=params.user_group) if params.security_enabled: TemplateConfig(format(params.atlas_jaas_file), owner=params.metadata_user) if type == 'server' and params.search_backend_solr and params.has_infra_solr: solr_cloud_util.setup_solr_client(params.config) check_znode() jaasFile = params.atlas_jaas_file if params.security_enabled else None upload_conf_set('atlas_configs', jaasFile) if params.security_enabled: # update permissions before creating the collections solr_cloud_util.add_solr_roles( params.config, roles=[ params.infra_solr_role_atlas, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev ], new_service_principals=[params.atlas_jaas_principal]) create_collection('vertex_index', 'atlas_configs', jaasFile) create_collection('edge_index', 'atlas_configs', jaasFile) create_collection('fulltext_index', 'atlas_configs', jaasFile) if params.security_enabled: secure_znode(format('{infra_solr_znode}/configs/atlas_configs'), jaasFile) secure_znode(format('{infra_solr_znode}/collections/vertex_index'), jaasFile) secure_znode(format('{infra_solr_znode}/collections/edge_index'), jaasFile) secure_znode( format('{infra_solr_znode}/collections/fulltext_index'), jaasFile) File(params.atlas_hbase_setup, group=params.user_group, owner=params.hbase_user, content=Template("atlas_hbase_setup.rb.j2")) is_atlas_upgrade_support = check_stack_feature( StackFeature.ATLAS_UPGRADE_SUPPORT, get_stack_feature_version(params.config)) if is_atlas_upgrade_support and params.security_enabled: File(params.atlas_kafka_setup, group=params.user_group, owner=params.kafka_user, content=Template("atlas_kafka_acl.sh.j2")) # files required only in case if kafka broker is not present on the host as configured component if not params.host_with_kafka: File(format("{kafka_conf_dir}/kafka-env.sh"), owner=params.kafka_user, content=InlineTemplate(params.kafka_env_sh_template)) File(format("{kafka_conf_dir}/kafka_jaas.conf"), group=params.user_group, owner=params.kafka_user, content=Template("kafka_jaas.conf.j2")) if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len( params.namenode_host) > 1: XmlConfig( "hdfs-site.xml", conf_dir=params.conf_dir, configurations=params.config['configurations']['hdfs-site'], configuration_attributes=params.config['configurationAttributes'] ['hdfs-site'], owner=params.metadata_user, group=params.user_group, mode=0644) else: File(format('{conf_dir}/hdfs-site.xml'), action="delete") ''' Atlas requires hadoop core-site.xml to resolve users/groups synced in HadoopUGI for authentication and authorization process. Earlier the core-site.xml was available in Hbase conf directory which is a part of Atlas class-path, from stack 2.6 onwards, core-site.xml is no more available in Hbase conf directory. Hence need to create core-site.xml in Atlas conf directory. ''' if params.stack_supports_atlas_core_site and params.has_namenode: XmlConfig( "core-site.xml", conf_dir=params.conf_dir, configurations=params.config['configurations']['core-site'], configuration_attributes=params.config['configurationAttributes'] ['core-site'], owner=params.metadata_user, group=params.user_group, mode=0644) Directory( format('{metadata_home}/'), owner=params.metadata_user, group=params.user_group, recursive_ownership=True, )
def restore_collection(env): """ Restore collections - by copying snapshots with backup_* prefix, then remove old one and remove backup_* prefixes from the folder names. """ import params, command_commons env.set_params(command_commons) if command_commons.solr_num_shards == 0: raise Exception( format("The 'solr_shards' command parameter is required to set.")) if not command_commons.solr_restore_config_set: raise Exception( format( "The 'solr_restore_config_set' command parameter is required to set." )) Logger.info("Original core / host map: " + str(command_commons.solr_backup_core_host_map)) Logger.info("New core / host map: " + str(command_commons.solr_restore_core_host_map)) original_core_host_pairs = command_commons.sort_core_host_pairs( command_commons.solr_backup_core_host_map) new_core_host_pairs = command_commons.sort_core_host_pairs( command_commons.solr_restore_core_host_map) core_pairs = command_commons.create_core_pairs(original_core_host_pairs, new_core_host_pairs) Logger.info("Generated core pairs: " + str(core_pairs)) Logger.info( format("Remove write.lock files from folder '{index_location}'")) for write_lock_file in command_commons.get_files_by_pattern( format("{index_location}"), 'write.lock'): File(write_lock_file, action="delete") Logger.info( format( "Restore Solr Collection {collection} from {index_location} ...")) if command_commons.collection in [ "ranger_audits", "history", "hadoop_logs", "audit_logs", "vertex_index", "edge_index", "fulltext_index" ]: # Make sure ambari wont delete an important collection raise Exception( format( "Selected collection for restore is: {collection}. It is not recommended to restore on default collections." )) hdfs_cores_on_host = [] for core_pair in core_pairs: src_core = core_pair['src_core'] target_core = core_pair['target_core'] if src_core in command_commons.skip_cores: Logger.info(format("Core '{src_core}' (src) is filtered out.")) continue elif target_core in command_commons.skip_cores: Logger.info( format("Core '{target_core}' (target) is filtered out.")) continue core_data = command_commons.solr_restore_core_data only_if_cmd = format("test -d {index_location}/snapshot.{src_core}") core_root_dir = format("{solr_datadir}/backup_{target_core}") core_root_without_backup_dir = format("{solr_datadir}/{target_core}") if command_commons.solr_hdfs_path: Directory([core_root_dir], mode=0755, cd_access='a', create_parents=True, owner=params.infra_solr_user, group=params.user_group, only_if=only_if_cmd) else: Directory([ format("{core_root_dir}/data/index"), format("{core_root_dir}/data/tlog"), format("{core_root_dir}/data/snapshot_metadata") ], mode=0755, cd_access='a', create_parents=True, owner=params.infra_solr_user, group=params.user_group, only_if=only_if_cmd) core_details = core_data[target_core]['properties'] core_properties = {} core_properties['numShards'] = core_details['numShards'] core_properties[ 'collection.configName'] = command_commons.solr_restore_config_set core_properties['name'] = target_core core_properties['replicaType'] = core_details['replicaType'] core_properties['collection'] = command_commons.collection if command_commons.solr_hdfs_path: core_properties[ 'coreNodeName'] = 'backup_' + core_details['coreNodeName'] else: core_properties['coreNodeName'] = core_details['coreNodeName'] core_properties['shard'] = core_details['shard'] if command_commons.solr_hdfs_path: hdfs_solr_node_folder = command_commons.solr_hdfs_path + format( "/backup_{collection}/") + core_details['coreNodeName'] source_folder = format("{index_location}/snapshot.{src_core}/") if command_commons.check_folder_exists(source_folder): hdfs_cores_on_host.append(target_core) command_commons.HdfsResource( format("{hdfs_solr_node_folder}/data/index/"), type="directory", action="create_on_execute", source=source_folder, owner=params.infra_solr_user, mode=0755, recursive_chown=True, recursive_chmod=True) command_commons.HdfsResource( format("{hdfs_solr_node_folder}/data/tlog"), type="directory", action="create_on_execute", owner=params.infra_solr_user, mode=0755) command_commons.HdfsResource( format("{hdfs_solr_node_folder}/data/snapshot_metadata"), type="directory", action="create_on_execute", owner=params.infra_solr_user, mode=0755) else: copy_cmd = format("cp -r {index_location}/snapshot.{src_core}/* {core_root_dir}/data/index/") if command_commons.solr_keep_backup \ else format("mv {index_location}/snapshot.{src_core}/* {core_root_dir}/data/index/") Execute(copy_cmd, only_if=only_if_cmd, user=params.infra_solr_user, logoutput=True) PropertiesFile(core_root_dir + '/core.properties', properties=core_properties, owner=params.infra_solr_user, group=params.user_group, mode=0644, only_if=only_if_cmd) Execute(format("rm -rf {solr_datadir}/{collection}*"), user=params.infra_solr_user, logoutput=True) for core_pair in core_pairs: src_core = core_pair['src_core'] src_host = core_pair['src_host'] target_core = core_pair['target_core'] if src_core in command_commons.skip_cores: Logger.info(format("Core '{src_core}' (src) is filtered out.")) continue elif target_core in command_commons.skip_cores: Logger.info( format("Core '{target_core}' (target) is filtered out.")) continue if os.path.exists(format("{index_location}/snapshot.{src_core}")): data_to_save = {} host_core_data = command_commons.solr_restore_core_data core_details = host_core_data[target_core]['properties'] core_node = core_details['coreNodeName'] data_to_save['core'] = target_core data_to_save['core_node'] = core_node data_to_save['old_host'] = core_pair['target_host'] data_to_save['new_host'] = src_host if command_commons.solr_hdfs_path: data_to_save['new_core_node'] = "backup_" + core_node else: data_to_save['new_core_node'] = core_node command_commons.write_core_file(target_core, data_to_save) jaas_file = params.infra_solr_jaas_file if params.security_enabled else None core_json_location = format("{index_location}/{target_core}.json") znode_json_location = format( "/restore_metadata/{collection}/{target_core}.json") solr_cloud_util.copy_solr_znode_from_local(params.zookeeper_quorum, params.infra_solr_znode, params.java64_home, jaas_file, core_json_location, znode_json_location) core_root_dir = format("{solr_datadir}/backup_{target_core}") core_root_without_backup_dir = format("{solr_datadir}/{target_core}") if command_commons.solr_hdfs_path: if target_core in hdfs_cores_on_host: Logger.info( format( "Core data '{target_core}' is located on this host, processing..." )) host_core_data = command_commons.solr_restore_core_data core_details = host_core_data[target_core]['properties'] core_node = core_details['coreNodeName'] collection_core_dir = command_commons.solr_hdfs_path + format( "/{collection}/{core_node}") backup_collection_core_dir = command_commons.solr_hdfs_path + format( "/backup_{collection}/{core_node}") command_commons.HdfsResource(collection_core_dir, type="directory", action="delete_on_execute", owner=params.infra_solr_user) if command_commons.check_hdfs_folder_exists( backup_collection_core_dir): collection_backup_core_dir = command_commons.solr_hdfs_path + format( "/{collection}/backup_{core_node}") command_commons.move_hdfs_folder( backup_collection_core_dir, collection_backup_core_dir) else: Logger.info( format( "Core data '{target_core}' is not located on this host, skipping..." )) Execute(format("mv {core_root_dir} {core_root_without_backup_dir}"), user=params.infra_solr_user, logoutput=True, only_if=format("test -d {core_root_dir}")) Directory([format("{core_root_without_backup_dir}")], mode=0755, cd_access='a', create_parents=True, owner=params.infra_solr_user, group=params.user_group, recursive_ownership=True, only_if=format("test -d {core_root_without_backup_dir}")) if command_commons.solr_hdfs_path and not command_commons.solr_keep_backup: only_if_cmd = format( "test -d {index_location}/snapshot.{src_core}") Directory(format("{index_location}/snapshot.{src_core}"), action="delete", only_if=only_if_cmd, owner=params.infra_solr_user)
def metadata(type='server'): import params # Needed by both Server and Client Directory(params.conf_dir, mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) if type == "server": Directory([params.pid_dir], mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) Directory(format('{conf_dir}/solr'), mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True, recursive_ownership=True) Directory(params.log_dir, mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) Directory(params.data_dir, mode=0644, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) Directory(params.expanded_war_dir, mode=0644, cd_access='a', owner=params.metadata_user, group=params.user_group, create_parents=True) File(format("{expanded_war_dir}/atlas.war"), content=StaticFile( format('{metadata_home}/server/webapp/atlas.war'))) File(format("{conf_dir}/atlas-log4j.xml"), mode=0644, owner=params.metadata_user, group=params.user_group, content=InlineTemplate(params.metadata_log4j_content)) File(format("{conf_dir}/atlas-env.sh"), owner=params.metadata_user, group=params.user_group, mode=0755, content=InlineTemplate(params.metadata_env_content)) if params.metadata_solrconfig_content: File(format("{conf_dir}/solr/solrconfig.xml"), mode=0644, owner=params.metadata_user, group=params.user_group, content=InlineTemplate(params.metadata_solrconfig_content)) # Needed by both Server and Client PropertiesFile(format('{conf_dir}/{conf_file}'), properties=params.application_properties, mode=0644, owner=params.metadata_user, group=params.user_group) if params.security_enabled: TemplateConfig(format(params.atlas_jaas_file), owner=params.metadata_user) if type == 'server' and params.search_backend_solr and params.has_infra_solr: solr_cloud_util.setup_solr_client(params.config) check_znode() jaasFile = params.atlas_jaas_file if params.security_enabled else None upload_conf_set('atlas_configs', jaasFile) create_collection('vertex_index', 'atlas_configs', jaasFile) create_collection('edge_index', 'atlas_configs', jaasFile) create_collection('fulltext_index', 'atlas_configs', jaasFile) File(params.atlas_hbase_setup, group=params.user_group, owner=params.hbase_user, content=Template("atlas_hbase_setup.rb.j2")) if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, get_stack_feature_version(params.config)) and\ params.security_enabled and not params.host_with_kafka: File(params.atlas_kafka_setup, group=params.user_group, owner=params.kafka_user, content=Template("atlas_kafka_acl.sh.j2")) File(format("{kafka_conf_dir}/kafka-env.sh"), owner=params.kafka_user, content=InlineTemplate(params.kafka_env_sh_template)) File(format("{kafka_conf_dir}/kafka_jaas.conf"), group=params.user_group, owner=params.kafka_user, content=Template("kafka_jaas.conf.j2"))
def metadata(type='server'): import params # Needed by both Server and Client Directory( params.conf_dir, mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, ) if type == "server": Directory([params.pid_dir], mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group) Directory( format('{conf_dir}/solr'), mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group, ) Execute(("chown", "-R", params.metadata_user + ":" + params.user_group, format('{conf_dir}/solr')), sudo=True) Directory(params.log_dir, mode=0755, cd_access='a', owner=params.metadata_user, group=params.user_group) Directory(params.data_dir, mode=0644, cd_access='a', owner=params.metadata_user, group=params.user_group) Directory(params.expanded_war_dir, mode=0644, cd_access='a', owner=params.metadata_user, group=params.user_group) File(format("{expanded_war_dir}/atlas.war"), content=StaticFile( format('{metadata_home}/server/webapp/atlas.war'))) File(format("{conf_dir}/atlas-log4j.xml"), mode=0644, owner=params.metadata_user, group=params.user_group, content=InlineTemplate(params.metadata_log4j_content)) File(format("{conf_dir}/atlas-env.sh"), owner=params.metadata_user, group=params.user_group, mode=0755, content=InlineTemplate(params.metadata_env_content)) files_to_chown = [ format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties") ] for file in files_to_chown: if os.path.exists(file): Execute( ('chown', format('{metadata_user}:{user_group}'), file), sudo=True) Execute(('chmod', '644', file), sudo=True) if params.metadata_solrconfig_content: File(format("{conf_dir}/solr/solrconfig.xml"), mode=0644, owner=params.metadata_user, group=params.user_group, content=InlineTemplate(params.metadata_solrconfig_content)) # Needed by both Server and Client PropertiesFile(format('{conf_dir}/{conf_file}'), properties=params.application_properties, mode=0644, owner=params.metadata_user, group=params.user_group) if params.security_enabled: TemplateConfig(format(params.atlas_jaas_file), owner=params.metadata_user) if type == 'server' and params.search_backend_solr: create_collection('vertex_index') create_collection('edge_index') create_collection('fulltext_index') File(params.atlas_hbase_setup, group=params.user_group, owner=params.hbase_user, content=Template("atlas_hbase_setup.rb.j2"))
def setup_conf_dir(name=None): # 'master' or 'tserver' or 'monitor' or 'gc' or 'tracer' or 'client' import params # check if confdir is a link if not os.path.exists(params.conf_dir) or not os.path.islink(params.conf_dir): raise Fail("confdir {} must be a symlink".format(params.conf_dir)) if name == 'client': dest_conf_dir = params.conf_dir # create a site file for client processes configs = {} configs.update(params.config['configurations']['accumulo-site']) if "instance.secret" in configs: configs.pop("instance.secret") if "trace.token.property.password" in configs: configs.pop("trace.token.property.password") XmlConfig("accumulo-site.xml", conf_dir = dest_conf_dir, configurations = configs, configuration_attributes=params.config['configurationAttributes']['accumulo-site'], owner = params.accumulo_user, group = params.user_group, mode = 0644 ) # create env file File(format("{dest_conf_dir}/accumulo-env.sh"), mode=0644, group=params.user_group, owner=params.accumulo_user, content=InlineTemplate(params.env_sh_template) ) else: dest_conf_dir = params.server_conf_dir # create server conf directory Directory( params.server_conf_dir, mode=0700, owner = params.accumulo_user, group = params.user_group, create_parents = True ) # create a site file for server processes configs = {} configs.update(params.config['configurations']['accumulo-site']) configs["instance.secret"] = str(params.config['configurations']['accumulo-env']['instance_secret']) configs["trace.token.property.password"] = str(params.trace_password) XmlConfig( "accumulo-site.xml", conf_dir = dest_conf_dir, configurations = configs, configuration_attributes=params.config['configurationAttributes']['accumulo-site'], owner = params.accumulo_user, group = params.user_group, mode = 0600 ) # create pid dir Directory( params.pid_dir, owner = params.accumulo_user, group = params.user_group, create_parents = True, cd_access = "a", mode = 0755, ) # create log dir Directory (params.log_dir, owner = params.accumulo_user, group = params.user_group, create_parents = True, cd_access = "a", mode = 0755, ) # create env file File(format("{dest_conf_dir}/accumulo-env.sh"), mode=0644, group=params.user_group, owner=params.accumulo_user, content=InlineTemplate(params.server_env_sh_template) ) if params.security_enabled: accumulo_TemplateConfig("accumulo_jaas.conf", dest_conf_dir) # create client.conf file configs = {} if 'client' in params.config['configurations']: configs.update(params.config['configurations']['client']) configs["instance.name"] = params.instance_name configs["instance.zookeeper.host"] = params.config['configurations']['accumulo-site']['instance.zookeeper.host'] copy_site_property(configs, 'instance.rpc.sasl.enabled') copy_site_property(configs, 'rpc.sasl.qop') copy_site_property(configs, 'rpc.useJsse') copy_site_property(configs, 'instance.rpc.ssl.clientAuth') copy_site_property(configs, 'instance.rpc.ssl.enabled') copy_site_property(configs, 'instance.zookeeper.timeout') copy_site_property(configs, 'trace.span.receivers') copy_site_property(configs, 'trace.zookeeper.path') for key,value in params.config['configurations']['accumulo-site'].iteritems(): if key.startswith("trace.span.receiver."): configs[key] = value PropertiesFile(format("{dest_conf_dir}/client.conf"), properties = configs, owner = params.accumulo_user, group = params.user_group ) # create log4j.properties files if (params.log4j_props != None): File(format("{dest_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.accumulo_user, content=params.log4j_props ) else: File(format("{dest_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.hbase_user ) # create logging configuration files accumulo_TemplateConfig("auditLog.xml", dest_conf_dir) accumulo_TemplateConfig("generic_logger.xml", dest_conf_dir) accumulo_TemplateConfig("monitor_logger.xml", dest_conf_dir) accumulo_StaticFile("accumulo-metrics.xml", dest_conf_dir) # create host files accumulo_TemplateConfig("tracers", dest_conf_dir) accumulo_TemplateConfig("gc", dest_conf_dir) accumulo_TemplateConfig("monitor", dest_conf_dir) accumulo_TemplateConfig("slaves", dest_conf_dir) accumulo_TemplateConfig("masters", dest_conf_dir) # metrics configuration if params.has_metric_collector: accumulo_TemplateConfig( "hadoop-metrics2-accumulo.properties", dest_conf_dir) # other server setup if name == 'master': params.HdfsResource(format("/user/{params.accumulo_user}"), type="directory", action="create_on_execute", owner=params.accumulo_user, mode=0700 ) params.HdfsResource(format("{params.parent_dir}"), type="directory", action="create_on_execute", owner=params.accumulo_user, mode=0700 ) params.HdfsResource(None, action="execute") if params.security_enabled and params.has_secure_user_auth: Execute( format("{params.kinit_cmd} " "{params.daemon_script} init " "--user {params.accumulo_principal_name} " "--instance-name {params.instance_name} " "--clear-instance-name " ">{params.log_dir}/accumulo-init.out " "2>{params.log_dir}/accumulo-init.err"), not_if=as_user(format("{params.kinit_cmd} " "{params.hadoop_bin_dir}/hadoop --config " "{params.hadoop_conf_dir} fs -stat " "{params.instance_volumes}"), params.accumulo_user), logoutput=True, user=params.accumulo_user) else: passfile = format("{params.exec_tmp_dir}/pass") try: File(passfile, mode=0600, group=params.user_group, owner=params.accumulo_user, content=InlineTemplate('{{root_password}}\n' '{{root_password}}\n\n') ) Execute( format("cat {passfile} | {params.daemon_script} init " "--instance-name {params.instance_name} " "--clear-instance-name " ">{params.log_dir}/accumulo-init.out " "2>{params.log_dir}/accumulo-init.err"), not_if=as_user(format("{params.kinit_cmd} " "{params.hadoop_bin_dir}/hadoop --config " "{params.hadoop_conf_dir} fs -stat " "{params.instance_volumes}"), params.accumulo_user), logoutput=True, user=params.accumulo_user) finally: File(passfile, action = "delete") if name == 'tracer': if params.security_enabled and params.has_secure_user_auth: Execute( format("{params.kinit_cmd} " "{params.daemon_script} init --reset-security " "--user {params.accumulo_principal_name} " "--password NA " ">{params.log_dir}/accumulo-reset.out " "2>{params.log_dir}/accumulo-reset.err"), not_if=as_user(format("{params.kinit_cmd} " "{params.daemon_script} shell -e " "\"userpermissions -u " "{params.accumulo_principal_name}\" | " "grep System.CREATE_TABLE"), params.accumulo_user), user=params.accumulo_user) create_user(params.smokeuser_principal, params.smoke_test_password) else: # do not try to reset security in nonsecure mode, for now # Execute( format("{params.daemon_script} init --reset-security " # "--user root " # ">{params.log_dir}/accumulo-reset.out " # "2>{params.log_dir}/accumulo-reset.err"), # not_if=as_user(format("cat {rpassfile} | " # "{params.daemon_script} shell -e " # "\"userpermissions -u root\" | " # "grep System.CREATE_TABLE"), # params.accumulo_user), # user=params.accumulo_user) create_user(params.smoke_test_user, params.smoke_test_password) create_user(params.trace_user, params.trace_password) rpassfile = format("{params.exec_tmp_dir}/pass0") cmdfile = format("{params.exec_tmp_dir}/resetcmds") try: File(cmdfile, mode=0600, group=params.user_group, owner=params.accumulo_user, content=InlineTemplate('grant -t trace -u {{trace_user}} Table.ALTER_TABLE\n' 'grant -t trace -u {{trace_user}} Table.READ\n' 'grant -t trace -u {{trace_user}} Table.WRITE\n\n') ) if params.security_enabled and params.has_secure_user_auth: Execute( format("{params.kinit_cmd} {params.daemon_script} shell -f " "{cmdfile}"), only_if=as_user(format("{params.kinit_cmd} " "{params.daemon_script} shell " "-e \"table trace\""), params.accumulo_user), not_if=as_user(format("{params.kinit_cmd} " "{params.daemon_script} shell " "-e \"userpermissions -u " "{params.trace_user} | " "grep Table.READ | grep trace"), params.accumulo_user), user=params.accumulo_user) else: File(rpassfile, mode=0600, group=params.user_group, owner=params.accumulo_user, content=InlineTemplate('{{root_password}}\n\n') ) Execute( format("cat {rpassfile} | {params.daemon_script} shell -f " "{cmdfile} -u root"), only_if=as_user(format("cat {rpassfile} | " "{params.daemon_script} shell -u root " "-e \"table trace\""), params.accumulo_user), not_if=as_user(format("cat {rpassfile} | " "{params.daemon_script} shell -u root " "-e \"userpermissions -u " "{params.trace_user} | " "grep Table.READ | grep trace"), params.accumulo_user), user=params.accumulo_user) finally: try_remove(rpassfile) try_remove(cmdfile)
def setup_spark(env, type, upgrade_type = None, action = None): import params Directory([params.spark_pid_dir, params.spark_log_dir], owner=params.spark_user, group=params.user_group, mode=0775, create_parents = True, cd_access = 'a', ) if type == 'server' and action == 'config': params.HdfsResource(params.spark_hdfs_user_dir, type="directory", action="create_on_execute", owner=params.spark_user, mode=0775 ) params.HdfsResource(None, action="execute") PropertiesFile(format("{spark_conf}/spark-defaults.conf"), properties = params.config['configurations']['spark2-defaults'], key_value_delimiter = " ", owner=params.spark_user, group=params.spark_group, mode=0644 ) # create spark-env.sh in etc/conf dir File(os.path.join(params.spark_conf, 'spark-env.sh'), owner=params.spark_user, group=params.spark_group, content=InlineTemplate(params.spark_env_sh), mode=0644, ) #create log4j.properties in etc/conf dir File(os.path.join(params.spark_conf, 'log4j.properties'), owner=params.spark_user, group=params.spark_group, content=params.spark_log4j_properties, mode=0644, ) #create metrics.properties in etc/conf dir File(os.path.join(params.spark_conf, 'metrics.properties'), owner=params.spark_user, group=params.spark_group, content=InlineTemplate(params.spark_metrics_properties), mode=0644 ) if params.is_hive_installed: XmlConfig("hive-site.xml", conf_dir=params.spark_conf, configurations=params.spark_hive_properties, owner=params.spark_user, group=params.spark_group, mode=0644) if params.has_spark_thriftserver: PropertiesFile(params.spark_thrift_server_conf_file, properties = params.config['configurations']['spark2-thrift-sparkconf'], owner = params.hive_user, group = params.user_group, key_value_delimiter = " ", mode=0644 ) effective_version = params.version if upgrade_type is not None else params.stack_version_formatted if effective_version: effective_version = format_stack_version(effective_version) if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version): # create spark-thrift-fairscheduler.xml File(os.path.join(params.spark_conf,"spark-thrift-fairscheduler.xml"), owner=params.spark_user, group=params.spark_group, mode=0755, content=InlineTemplate(params.spark_thrift_fairscheduler_content) )
def setup_tagsync(upgrade_type=None): import params ranger_tagsync_home = params.ranger_tagsync_home ranger_home = params.ranger_home ranger_tagsync_conf = params.ranger_tagsync_conf tagsync_log4j_file = format('{ranger_tagsync_conf}/log4j.xml') tagsync_services_file = format( '{ranger_tagsync_home}/ranger-tagsync-services.sh') Directory(format("{ranger_tagsync_conf}"), owner=params.unix_user, group=params.unix_group, create_parents=True) Directory( params.ranger_pid_dir, mode=0750, create_parents=True, owner=params.unix_user, group=params.unix_group, cd_access="a", ) Directory(params.tagsync_log_dir, create_parents=True, owner=params.unix_user, group=params.unix_group, cd_access="a", mode=0755) File(format('{ranger_tagsync_conf}/ranger-tagsync-env-logdir.sh'), content=format("export RANGER_TAGSYNC_LOG_DIR={tagsync_log_dir}"), owner=params.unix_user, group=params.unix_group, mode=0755) XmlConfig( "ranger-tagsync-site.xml", conf_dir=ranger_tagsync_conf, configurations=params.config['configurations']['ranger-tagsync-site'], configuration_attributes=params.config['configuration_attributes'] ['ranger-tagsync-site'], owner=params.unix_user, group=params.unix_group, mode=0644) PropertiesFile(format('{ranger_tagsync_conf}/application.properties'), properties=params.tagsync_application_properties, mode=0755, owner=params.unix_user, group=params.unix_group) if upgrade_type is not None: src_file = format( '{ranger_tagsync_home}/ews/webapp/WEB-INF/classes/conf.dist/log4j.xml' ) dst_file = format('{tagsync_log4j_file}') Execute(('cp', '-f', src_file, dst_file), sudo=True) if os.path.isfile(tagsync_log4j_file): File(tagsync_log4j_file, owner=params.unix_user, group=params.unix_group) else: Logger.warning( 'Required file {0} does not exist, copying the file to {1} path'. format(tagsync_log4j_file, ranger_tagsync_conf)) src_file = format( '{ranger_tagsync_home}/ews/webapp/WEB-INF/classes/conf.dist/log4j.xml' ) dst_file = format('{tagsync_log4j_file}') Execute(('cp', '-f', src_file, dst_file), sudo=True) File(tagsync_log4j_file, owner=params.unix_user, group=params.unix_group) cred_lib = os.path.join(ranger_tagsync_home, "lib", "*") if not is_empty(params.tagsync_jceks_path) and not is_empty( params.ranger_tagsync_tagadmin_password ) and params.tagsync_enabled: ranger_credential_helper(cred_lib, 'tagadmin.user.password', params.ranger_tagsync_tagadmin_password, params.tagsync_jceks_path) File(params.tagsync_jceks_path, owner=params.unix_user, group=params.unix_group, mode=0640) File( tagsync_services_file, mode=0755, ) Execute(('ln', '-sf', format('{tagsync_services_file}'), '/usr/bin/ranger-tagsync'), not_if=format("ls /usr/bin/ranger-tagsync"), only_if=format("ls {tagsync_services_file}"), sudo=True)
def kafka(upgrade_type=None): import params ensure_base_directories() kafka_server_config = mutable_config_dict( params.config['configurations']['kafka-broker']) # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2. # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to. effective_version = params.hdp_stack_version if upgrade_type is None else format_hdp_stack_version( params.version) Logger.info(format("Effective stack version: {effective_version}")) if effective_version is not None and effective_version != "" and compare_versions( effective_version, '2.2.0.0') >= 0 and compare_versions( effective_version, '2.3.0.0') < 0: if len(params.kafka_hosts ) > 0 and params.hostname in params.kafka_hosts: brokerid = str(sorted(params.kafka_hosts).index(params.hostname)) kafka_server_config['broker.id'] = brokerid Logger.info(format("Calculating broker.id as {brokerid}")) # listeners and advertised.listeners are only added in 2.3.0.0 onwards. if effective_version is not None and effective_version != "" and compare_versions( effective_version, '2.3.0.0') >= 0: listeners = kafka_server_config['listeners'].replace( "localhost", params.hostname) Logger.info(format("Kafka listeners: {listeners}")) if params.security_enabled and params.kafka_kerberos_enabled: Logger.info("Kafka kerberos security is enabled.") if "SASL" not in listeners: listeners = listeners.replace("PLAINTEXT", "PLAINTEXTSASL") kafka_server_config['listeners'] = listeners kafka_server_config['advertised.listeners'] = listeners Logger.info(format("Kafka advertised listeners: {listeners}")) else: kafka_server_config['listeners'] = listeners if 'advertised.listeners' in kafka_server_config: advertised_listeners = kafka_server_config[ 'advertised.listeners'].replace("localhost", params.hostname) kafka_server_config[ 'advertised.listeners'] = advertised_listeners Logger.info( format( "Kafka advertised listeners: {advertised_listeners}")) else: kafka_server_config['host.name'] = params.hostname if params.has_metric_collector: kafka_server_config[ 'kafka.timeline.metrics.host'] = params.metric_collector_host kafka_server_config[ 'kafka.timeline.metrics.port'] = params.metric_collector_port kafka_server_config[ 'kafka.timeline.metrics.protocol'] = params.metric_collector_protocol kafka_server_config[ 'kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path kafka_server_config[ 'kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type kafka_server_config[ 'kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password kafka_data_dir = kafka_server_config['log.dirs'] kafka_data_dirs = filter(None, kafka_data_dir.split(",")) Directory( kafka_data_dirs, mode=0755, cd_access='a', owner=params.kafka_user, group=params.user_group, create_parents=True, recursive_ownership=True, ) PropertiesFile( "server.properties", dir=params.conf_dir, properties=kafka_server_config, owner=params.kafka_user, group=params.user_group, ) File(format("{conf_dir}/kafka-env.sh"), owner=params.kafka_user, content=InlineTemplate(params.kafka_env_sh_template)) if (params.log4j_props != None): File(format("{conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.kafka_user, content=params.log4j_props) if params.security_enabled and params.kafka_kerberos_enabled: TemplateConfig(format("{conf_dir}/kafka_jaas.conf"), owner=params.kafka_user) TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"), owner=params.kafka_user) # On some OS this folder could be not exists, so we will create it before pushing there files Directory(params.limits_conf_dir, create_parents=True, owner='root', group='root') File(os.path.join(params.limits_conf_dir, 'kafka.conf'), owner='root', group='root', mode=0644, content=Template("kafka.conf.j2")) setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir) setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
def setup_logfeeder(): import params Directory([ params.logfeeder_log_dir, params.logfeeder_pid_dir, params.logfeeder_checkpoint_folder ], mode=0755, cd_access='a', create_parents=True) Directory([params.logfeeder_dir, params.logsearch_logfeeder_conf], mode=0755, cd_access='a', create_parents=True, recursive_ownership=True) File(params.logfeeder_log, mode=0644, content='') params.logfeeder_env_config = update_credential_provider_path( params.logfeeder_env_config, 'logfeeder-env', params.logfeeder_env_jceks_file, params.logsearch_user, params.user_group) params.logfeeder_properties[ HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file' + params.logfeeder_env_jceks_file PropertiesFile(format("{logsearch_logfeeder_conf}/logfeeder.properties"), properties=params.logfeeder_properties) File(format("{logsearch_logfeeder_conf}/logfeeder-env.sh"), content=InlineTemplate(params.logfeeder_env_content), mode=0755) File(format("{logsearch_logfeeder_conf}/log4j.xml"), content=InlineTemplate(params.logfeeder_log4j_content)) File(format("{logsearch_logfeeder_conf}/grok-patterns"), content=InlineTemplate(params.logfeeder_grok_patterns), encoding="utf-8") File(format("{logsearch_logfeeder_conf}/input.config-ambari.json"), content=InlineTemplate(params.logfeeder_ambari_config_content), encoding="utf-8") File(format("{logsearch_logfeeder_conf}/output.config.json"), content=InlineTemplate(params.logfeeder_output_config_content), encoding="utf-8") for file_name in params.logfeeder_default_config_file_names: File(format("{logsearch_logfeeder_conf}/" + file_name), content=Template(file_name + ".j2")) File(format( "{logsearch_logfeeder_conf}/input.config-logfeeder-custom.json"), action='delete') for service, pattern_content in params.logfeeder_metadata.iteritems(): File(format("{logsearch_logfeeder_conf}/input.config-" + service.replace('-logsearch-conf', '') + ".json"), content=InlineTemplate(pattern_content, extra_imports=[default])) if params.logfeeder_system_log_enabled: File(format( "{logsearch_logfeeder_conf}/input.config-system_messages.json"), content=params.logfeeder_system_messages_content) File(format("{logsearch_logfeeder_conf}/input.config-secure_log.json"), content=params.logfeeder_secure_log_content) if params.logsearch_solr_kerberos_enabled: File(format("{logfeeder_jaas_file}"), content=Template("logfeeder_jaas.conf.j2"))
def setup_logsearch(): import params Directory([params.logsearch_log_dir, params.logsearch_pid_dir], mode=0755, cd_access='a', owner=params.logsearch_user, group=params.user_group, create_parents=True ) Directory([params.logsearch_dir, params.logsearch_server_conf, params.logsearch_config_set_dir], mode=0755, cd_access='a', owner=params.logsearch_user, group=params.user_group, create_parents=True, recursive_ownership=True ) Directory(params.logsearch_server_keys_folder, cd_access='a', mode=0755, owner=params.logsearch_user, group=params.user_group) File(params.logsearch_log, mode=0644, owner=params.logsearch_user, group=params.user_group, content='' ) params.logsearch_env_config = update_credential_provider_path(params.logsearch_env_config, 'logsearch-env', params.logsearch_env_jceks_file, params.logsearch_user, params.user_group ) params.logsearch_properties[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file' + params.logsearch_env_jceks_file PropertiesFile(format("{logsearch_server_conf}/logsearch.properties"), properties=params.logsearch_properties ) File(format("{logsearch_server_conf}/HadoopServiceConfig.json"), content=Template("HadoopServiceConfig.json.j2"), owner=params.logsearch_user, group=params.user_group ) File(format("{logsearch_server_conf}/log4j.xml"), content=InlineTemplate(params.logsearch_app_log4j_content), owner=params.logsearch_user, group=params.user_group ) File(format("{logsearch_server_conf}/logsearch-env.sh"), content=InlineTemplate(params.logsearch_env_content), mode=0755, owner=params.logsearch_user, group=params.user_group ) File(format("{logsearch_server_conf}/logsearch-admin.json"), content=InlineTemplate(params.logsearch_admin_content), owner=params.logsearch_user, group=params.user_group ) File(format("{logsearch_config_set_dir}/hadoop_logs/conf/solrconfig.xml"), content=InlineTemplate(params.logsearch_service_logs_solrconfig_content), owner=params.logsearch_user, group=params.user_group ) File(format("{logsearch_config_set_dir}/audit_logs/conf/solrconfig.xml"), content=InlineTemplate(params.logsearch_audit_logs_solrconfig_content), owner=params.logsearch_user, group=params.user_group ) if params.security_enabled: File(format("{logsearch_jaas_file}"), content=Template("logsearch_jaas.conf.j2"), owner=params.logsearch_user ) Execute(("chmod", "-R", "ugo+r", format("{logsearch_server_conf}/solr_configsets")), sudo=True ) check_znode() if params.security_enabled and not params.logsearch_use_external_solr: solr_cloud_util.add_solr_roles(params.config, roles = [params.infra_solr_role_logsearch, params.infra_solr_role_ranger_admin, params.infra_solr_role_dev], new_service_principals = [params.logsearch_kerberos_principal]) solr_cloud_util.add_solr_roles(params.config, roles = [params.infra_solr_role_logfeeder, params.infra_solr_role_dev], new_service_principals = [params.logfeeder_kerberos_principal])
def druid(upgrade_type=None, nodeType=None): import params ensure_base_directories() # Environment Variables File(format("{params.druid_conf_dir}/druid-env.sh"), owner=params.druid_user, content=InlineTemplate(params.druid_env_sh_template), mode=0700) # common config druid_common_config = mutable_config_dict( params.config['configurations']['druid-common']) # User cannot override below configs druid_common_config['druid.host'] = params.hostname druid_common_config[ 'druid.extensions.directory'] = params.druid_extensions_dir druid_common_config[ 'druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir druid_common_config[ 'druid.selectors.indexing.serviceName'] = params.config[ 'configurations']['druid-overlord']['druid.service'] druid_common_config['druid.selectors.coordinator.serviceName'] = \ params.config['configurations']['druid-coordinator']['druid.service'] druid_common_config['druid.extensions.loadList'] = json.dumps( eval(params.druid_extensions_load_list) + eval(params.druid_security_extensions_load_list)) # delete the password and user if empty otherwiswe derby will fail. if 'derby' == druid_common_config['druid.metadata.storage.type']: del druid_common_config['druid.metadata.storage.connector.user'] del druid_common_config['druid.metadata.storage.connector.password'] druid_env_config = mutable_config_dict( params.config['configurations']['druid-env']) PropertiesFile("common.runtime.properties", dir=params.druid_common_conf_dir, properties=druid_common_config, owner=params.druid_user, group=params.user_group, mode=0600) Logger.info("Created common.runtime.properties") File(format("{params.druid_common_conf_dir}/druid-log4j.xml"), mode=0644, owner=params.druid_user, group=params.user_group, content=InlineTemplate(params.log4j_props)) Logger.info("Created log4j file") File("/etc/logrotate.d/druid", mode=0644, owner='root', group='root', content=InlineTemplate(params.logrotate_props)) Logger.info("Created log rotate file") # node specific configs for node_type in [ 'coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router' ]: node_config_dir = format('{params.druid_conf_dir}/{node_type}') node_type_lowercase = node_type.lower() # Write runtime.properties file node_config = mutable_config_dict(params.config['configurations'][ format('druid-{node_type_lowercase}')]) PropertiesFile("runtime.properties", dir=node_config_dir, properties=node_config, owner=params.druid_user, group=params.user_group, mode=0600) Logger.info( format("Created druid-{node_type_lowercase} runtime.properties")) # Write jvm configs File( format('{node_config_dir}/jvm.config'), owner=params.druid_user, group=params.user_group, content=InlineTemplate( "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}", node_heap_memory=druid_env_config[format( 'druid.{node_type_lowercase}.jvm.heap.memory')], log4j_config_file=format( "{params.druid_common_conf_dir}/druid-log4j.xml"), node_direct_memory=druid_env_config[format( 'druid.{node_type_lowercase}.jvm.direct.memory')], node_jvm_opts=druid_env_config[format( 'druid.{node_type_lowercase}.jvm.opts')])) Logger.info(format("Created druid-{node_type_lowercase} jvm.config")) # Handling hadoop Lzo jars if enable and node type is hadoop related eg Overlords and MMs if ['middleManager', 'overlord' ].__contains__(node_type_lowercase) and params.lzo_enabled: try: Logger.info( format( "Copying hadoop lzo jars from {hadoop_lib_home} to {druid_hadoop_dependencies_dir}/hadoop-client/*/" )) Execute( format( '{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {druid_hadoop_dependencies_dir}/hadoop-client/*/' )) except Fail as ex: Logger.info( format( "No Hadoop LZO found at {hadoop_lib_home}/hadoop-lzo*.jar" )) # All druid nodes have dependency on hdfs_client ensure_hadoop_directories() download_database_connector_if_needed() # Pull all required dependencies pulldeps()