def action_create(self): template_tag = self.resource.template_tag qualified_file_name = self.resource.name file_name = os.path.basename(qualified_file_name) if not template_tag: template_name = format("{file_name}.j2") else: template_name = format("{file_name}-{template_tag}.j2") File(qualified_file_name, owner=self.resource.owner, group=self.resource.group, mode=self.resource.mode, content=Template(template_name, extra_imports=self.resource.extra_imports))
def action_create(self): with Environment.get_instance_copy() as env: repo_file_name = self.resource.repo_file_name repo_dir = get_repo_dir() repo_template = self.resource.repo_template new_content = Template(repo_template, repo_id=self.resource.repo_id, repo_file_name=self.resource.repo_file_name, base_url=self.resource.base_url, mirror_list=self.resource.mirror_list) repo_file_path = format("{repo_dir}/{repo_file_name}.repo") if self.resource.append_to_file and os.path.isfile(repo_file_path): with open(repo_file_path, 'a') as repo_file: repo_file.write('\n' + new_content.get_content()) else: File(repo_file_path, content=new_content)
def cassandra(): import params Directory( [params.log_dir, params.pid_dir, params.conf_dir], owner=params.cassandra_user, group=params.user_group, recursive=True) configurations = params.config['configurations']['cassandra-site'] File( format("{conf_dir}/cassandra.yaml"), content=Template( "cassandra.master.yaml.j2", configurations=configurations), owner=params.cassandra_user, group=params.user_group)
def decommission(self, env): import params env.set_params(params) yarn_user = params.yarn_user yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes") File(params.exclude_file_path, content=Template("exclude_hosts_list.j2"), owner=yarn_user, mode="f" ) if params.update_exclude_file_only == False: Execute(yarn_refresh_cmd, user=yarn_user)
def slider(): import params Directory(params.slider_conf_dir, create_parents = True ) slider_client_config = params.config['configurations']['slider-client'] if 'configurations' in params.config and 'slider-client' in params.config['configurations'] else {} XmlConfig("slider-client.xml", conf_dir=params.slider_conf_dir, configurations=slider_client_config, mode=0644 ) File(format("{slider_conf_dir}/slider-env.sh"), mode=0755, content=InlineTemplate(params.slider_env_sh_template) ) # check to see if the current/storm_slider_client symlink is broken if it is then the storm slider client is not installed storm_slider_client_dir = os.path.join(params.storm_slider_conf_dir, "..") if (os.path.exists(storm_slider_client_dir) or not os.path.islink(storm_slider_client_dir)): Directory(params.storm_slider_conf_dir, create_parents = True ) File(format("{storm_slider_conf_dir}/storm-slider-env.sh"), mode=0755, content=Template('storm-slider-env.sh.j2') ) if (params.log4j_props != None): File(format("{params.slider_conf_dir}/log4j.properties"), mode=0644, content=params.log4j_props ) elif (os.path.exists(format("{params.slider_conf_dir}/log4j.properties"))): File(format("{params.slider_conf_dir}/log4j.properties"), mode=0644 ) if params.stack_version_formatted and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted): File(params.slider_tar_gz, owner=params.hdfs_user, group=params.user_group, )
def configure(self, env, upgrade_type=None, config_dir=None): from params import params env.set_params(params) Logger.info("Running enrichment configure") File(format("{metron_config_path}/enrichment.properties"), content=Template("enrichment.properties.j2"), owner=params.metron_user, group=params.metron_group) if not metron_service.is_zk_configured(params): metron_service.init_zk_config(params) metron_service.set_zk_configured(params) metron_service.refresh_configs(params) Logger.info("Calling security setup") storm_security_setup(params)
def decommission(): import params hdfs_user = params.hdfs_user conf_dir = params.hadoop_conf_dir File(params.exclude_file_path, content=Template("exclude_hosts_list.j2"), owner=hdfs_user) if params.dfs_ha_enabled: # due to a bug in hdfs, refreshNodes will not run on both namenodes so we # need to execute each command scoped to a particular namenode nn_refresh_cmd = format( 'cmd /c hadoop dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes') else: nn_refresh_cmd = format('cmd /c hadoop dfsadmin -refreshNodes') Execute(nn_refresh_cmd, user=hdfs_user)
def configure(self, env, upgrade_type=None, config_dir=None): print 'configure managment_ui' from params import params env.set_params(params) File(format("{metron_config_path}/management_ui.yml"), mode=0755, content=Template("management_ui.yml.j2"), owner=params.metron_user, group=params.metron_group ) Directory('/var/run/metron', create_parents=False, mode=0755, owner=params.metron_user, group=params.metron_group )
def configure(self, env, upgrade_type=None, config_dir=None): from params import params env.set_params(params) File(format("/etc/default/metron"), content=Template("metron.j2")) metron_service.refresh_configs(params) commands = RestCommands(params) if not commands.is_kafka_configured(): commands.init_kafka_topics() if not commands.is_hbase_configured(): commands.create_hbase_tables() if params.security_enabled and not commands.is_hbase_acl_configured(): commands.set_hbase_acls() if params.security_enabled and not commands.is_kafka_acl_configured(): commands.init_kafka_acls() commands.set_kafka_acl_configured()
def load_global_config(params): Logger.info('Create Metron Local Config Directory') Logger.info("Configure Metron global.json") directories = [params.metron_zookeeper_config_path] Directory(directories, mode=0755, owner=params.metron_user, group=params.metron_group ) File(ambari_format("{metron_zookeeper_config_path}/global.json"), content=Template("global.json.j2"), owner=params.metron_user, group=params.metron_group ) init_config()
def create_elastic_pam_limits(params): """ Creates the PAM limits for Elasticsearch. """ Logger.info("Creating Elasticsearch PAM limits.") # in some OS this folder may not exist, so create it Logger.info("Ensure PAM limits directory exists: {0}".format(params.limits_conf_dir)) Directory(params.limits_conf_dir, create_parents=True, owner='root', group='root') Logger.info("Creating Elasticsearch PAM limits; file={0}".format(params.limits_conf_file)) File(params.limits_conf_file, content=Template('elasticsearch_limits.conf.j2'), owner="root", group="root")
def test_template_loader(self, exists_mock, getmtime_mock, open_mock): """ Testing template loader on existent file """ exists_mock.return_value = True getmtime_mock.return_value = 10 file_mock = MagicMock(name='file_mock') file_mock.__enter__.return_value = file_mock file_mock.read.return_value = 'template content' open_mock.return_value = file_mock with Environment("/base") as env: template = Template("test.j2") self.assertEqual(open_mock.call_count, 1) open_mock.assert_called_with('/base/templates/test.j2', 'rb') self.assertEqual(getmtime_mock.call_count, 1) getmtime_mock.assert_called_with('/base/templates/test.j2')
def action_create(self): with Environment.get_instance_copy() as env: with tempfile.NamedTemporaryFile() as tmpf: repo_file_name = format( "{repo_file_name}.list", repo_file_name=self.resource.repo_file_name) repo_file_path = format("{repo_dir}/{repo_file_name}", repo_dir=self.repo_dir) repo_template = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', REPO_TEMPLATE_FOLDER, self.resource.repo_template) new_content = Template( repo_template, package_type=self.package_type, base_url=self.resource.base_url, components=' '.join( self.resource.components)).get_content() old_content = '' if self.resource.append_to_file and os.path.isfile( repo_file_path): with open(repo_file_path) as repo_file: old_content = repo_file.read() + '\n' File(tmpf.name, content=old_content + new_content) if not os.path.isfile(repo_file_path) or not filecmp.cmp( tmpf.name, repo_file_path): File(repo_file_path, content=StaticFile(tmpf.name)) update_cmd_formatted = [format(x) for x in self.update_cmd] # this is time expensive retcode, out = checked_call(update_cmd_formatted, sudo=True) # add public keys for new repos missing_pkeys = set( re.findall(self.missing_pkey_regex, out)) for pkey in missing_pkeys: Execute( format(self.add_pkey_cmd), timeout= 15, # in case we are on the host w/o internet (using localrepo), we should ignore hanging ignore_failures=True)
def setup_solr(): import params Directory([ params.solr_dir, params.logsearch_solr_log_dir, params.logsearch_solr_piddir, params.logsearch_solr_conf, params.logsearch_solr_datadir, params.logsearch_solr_data_resources_dir ], mode=0755, cd_access='a', owner=params.logsearch_solr_user, group=params.logsearch_solr_group, create_parents=True) File(params.logsearch_solr_log, mode=0644, owner=params.logsearch_solr_user, group=params.logsearch_solr_group, content='') File(format("{logsearch_solr_conf}/logsearch-solr-env.sh"), content=InlineTemplate(params.solr_env_content), mode=0755, owner=params.logsearch_solr_user) File(format("{logsearch_solr_datadir}/solr.xml"), content=InlineTemplate(params.solr_xml_content), owner=params.logsearch_solr_user) File(format("{logsearch_solr_conf}/log4j.properties"), content=InlineTemplate(params.solr_log4j_content), owner=params.logsearch_solr_user) File(format("{logsearch_solr_datadir}/zoo.cfg"), content=Template("zoo.cfg.j2"), owner=params.logsearch_solr_user) zk_cli_prefix = format( 'export JAVA_HOME={java64_home}; {cloud_scripts}/zkcli.sh -zkhost {zookeeper_hosts}' ) Execute(format('{zk_cli_prefix} -cmd makepath {logsearch_solr_znode}'), not_if=format("{zk_cli_prefix} -cmd get {logsearch_solr_znode}"), ignore_failures=True, user=params.logsearch_solr_user)
def configure(self, env): import params import status_params env.set_params(params) content = InlineTemplate(status_params.krb5_template_config) File(format("/etc/krb5.conf"), content=content, owner='root', group='root', mode=0644) kdc_content = Template('kdc.conf.j2') File(format("/var/kerberos/krb5kdc/kdc.conf"), content=kdc_content, owner='root', group='root', mode=0644)
def setup_solr_kerberos_auth(): import params File(format("{solr_kerberos_jaas_config}"), content=Template("solr_server_jaas.conf.j2"), owner=params.solr_config_user ) if _has_security_json(): Logger.info("Solr Security Json was found, it will not be overridden") return command = format('{zk_client_prefix} -cmd put {solr_cloud_zk_directory}{security_json} ') command += format('\'{solr_security_json}\'') Execute(command, environment={'JAVA_HOME': params.java64_home}, ignore_failures=True, user=params.solr_config_user )
def setup_solr_kerberos_auth(): import params File(format("{solr_kerberos_jaas_config}"), content=Template("solr_server_jaas.conf.j2"), owner=params.solr_config_user ) if not params.solr_cloud_mode: return # TODO LWSHADOOP-637 add json in the config file and only upload it when kerberos is enable command = format('{zk_client_prefix} -cmd put {solr_cloud_zk_directory}{security_json} ') command += '\'{"authentication":{"class": "org.apache.solr.security.KerberosPlugin"}}\'' Execute(command, environment={'JAVA_HOME': params.java64_home}, ignore_failures=True, user=params.solr_config_user )
def setup_metastore(): import params if params.hive_metastore_site_supported: hivemetastore_site_config = get_config("hivemetastore-site") if hivemetastore_site_config: XmlConfig("hivemetastore-site.xml", conf_dir=params.hive_server_conf_dir, configurations=params.config['configurations'] ['hivemetastore-site'], configuration_attributes=params. config['configuration_attributes']['hivemetastore-site'], owner=params.hive_user, group=params.user_group, mode=0600) File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"), owner=params.hive_user, group=params.user_group, content=Template("hadoop-metrics2-hivemetastore.properties.j2"), mode=0600) File(params.start_metastore_path, mode=0755, content=StaticFile('startMetastore.sh')) if params.hive_repl_cmrootdir: params.HdfsResource(params.hive_repl_cmrootdir, type="directory", action="create_on_execute", owner=params.hive_user, group=params.user_group, mode=01777) if params.hive_repl_rootdir: params.HdfsResource(params.hive_repl_rootdir, type="directory", action="create_on_execute", owner=params.hive_user, group=params.user_group, mode=0700) if params.hive_repl_cmrootdir or params.hive_repl_rootdir: params.HdfsResource(None, action="execute")
def server_files(): import params rrd_py_path = params.rrd_py_path Directory(rrd_py_path, create_parents=True) rrd_py_file_path = path.join(rrd_py_path, "rrd.py") TemplateConfig(rrd_py_file_path, owner="root", group="root", mode=0755) rrd_file_owner = params.gmetad_user Directory(params.rrdcached_base_dir, owner=rrd_file_owner, group=rrd_file_owner, mode=0755, create_parents=True) if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family(): File(params.ganglia_apache_config_file, content=Template("ganglia.conf.j2"), mode=0644)
def init_parsers(self): Logger.info( "Copying grok patterns from local directory '{}' to HDFS '{}'". format(self.__params.local_grok_patterns_dir, self.__params.metron_apps_dir)) self.__params.HdfsResource( self.__params.metron_apps_dir, type="directory", action="create_on_execute", owner=self.__params.metron_user, mode=0775, source=self.__params.local_grok_patterns_dir) Logger.info("Creating global.json file") File(self.__params.metron_zookeeper_config_path + '/global.json', content=Template("metron-global.json"), owner=self.__params.metron_user, mode=0775) Logger.info("Done initializing parser configuration")
def elastic(): print "INSIDE THE %s" % __file__ import params params.path_data = params.path_data.replace('"', '') data_path = params.path_data.replace(' ', '').split(',') data_path[:] = [x.replace('"', '') for x in data_path] directories = [params.log_dir, params.pid_dir, params.conf_dir] directories = directories + data_path Directory(directories, create_parents=True, # recursive=True, mode=0755, owner=params.elastic_user, group=params.elastic_user ) print "Master env: ""{}/elastic-env.sh".format(params.conf_dir) File("{}/elastic-env.sh".format(params.conf_dir), owner=params.elastic_user, content=InlineTemplate(params.elastic_env_sh_template) ) configurations = params.config['configurations']['elastic-site'] print "Master yml: ""{}/elasticsearch.yml".format(params.conf_dir) File("{}/elasticsearch.yml".format(params.conf_dir), content=Template( "elasticsearch.master.yaml.j2", configurations=configurations), owner=params.elastic_user, group=params.elastic_user ) print "Master sysconfig: /etc/sysconfig/elasticsearch" File(format("/etc/sysconfig/elasticsearch"), owner="root", group="root", content=InlineTemplate(params.sysconfig_template) )
def setup_config(): import params stackversion = params.stack_version_unformatted Logger.info("FS Type: {0}".format(params.dfs_type)) is_hadoop_conf_dir_present = False if hasattr(params, "hadoop_conf_dir" ) and params.hadoop_conf_dir is not None and os.path.exists( params.hadoop_conf_dir): is_hadoop_conf_dir_present = True else: Logger.warning( "Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components." ) if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'): # create core-site only if the hadoop config diretory exists XmlConfig( "core-site.xml", conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['core-site'], configuration_attributes=params.config['configurationAttributes'] ['core-site'], owner=params.hdfs_user, group=params.user_group, only_if=format("ls {hadoop_conf_dir}")) Directory(params.logsearch_logfeeder_conf, mode=0755, cd_access='a', create_parents=True) if params.logsearch_config_file_exists: File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name), content=Template(params.logsearch_config_file_path, extra_imports=[default])) else: Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
def __generate_config_files(self): """ Generates pxf-env.sh file from jinja template and sets the classpath for HDP """ import params import shutil hdp_stack = "HDP" # Create file pxf-env.sh from jinja template File("{0}/pxf-env.sh".format(params.pxf_conf_dir), content=Template("pxf-env.j2")) # Classpath is set for PHD by default. If stack is HDP, set classpath for HDP if (params.stack_name == hdp_stack): shutil.copy2( "{0}/pxf-privatehdp.classpath".format(params.pxf_conf_dir), "{0}/pxf-private.classpath".format(params.pxf_conf_dir)) File('{0}/pxf-public.classpath'.format(params.pxf_conf_dir), content=params.config['configurations']['pxf-public-classpath'] ['content'].lstrip()) File('{0}/pxf-profiles.xml'.format(params.pxf_conf_dir), content=params.config['configurations']['pxf-profiles'] ['content'].lstrip()) if params.security_enabled: pxf_site_dict = dict(params.config['configurations']['pxf-site']) pxf_site_dict[ 'pxf.service.kerberos.principal'] = "{0}/_HOST@{1}".format( params.pxf_user, params.realm_name) pxf_site = ConfigDictionary(pxf_site_dict) else: pxf_site = params.config['configurations']['pxf-site'] XmlConfig( "pxf-site.xml", conf_dir=params.pxf_conf_dir, configurations=pxf_site, configuration_attributes=params.config['configuration_attributes'] ['pxf-site'])
def test_template_loader_arguments(self, exists_mock, getmtime_mock, open_mock): """ Testing template loader additional arguments in template and absolute file-path """ exists_mock.return_value = True getmtime_mock.return_value = 10 file_mock = MagicMock(name = 'file_mock') file_mock.__enter__.return_value = file_mock file_mock.read.return_value = '{{test_arg1}} template content' open_mock.return_value = file_mock with Environment("/base") as env: template = Template("/absolute/path/test.j2", [], test_arg1 = "test") content = template.get_content() self.assertEqual(open_mock.call_count, 1) self.assertEqual(u'test template content', content) open_mock.assert_called_with('/absolute/path/test.j2', 'rb') self.assertEqual(getmtime_mock.call_count, 1) getmtime_mock.assert_called_with('/absolute/path/test.j2')
def write_krb5_conf(): import params Directory(params.krb5_conf_dir, owner='root', create_parents=True, group='root', mode=0755) if (params.krb5_conf_template is None) or not params.krb5_conf_template.strip(): content = Template('krb5_conf.j2') else: content = InlineTemplate(params.krb5_conf_template) File(params.krb5_conf_path, content=content, owner='root', group='root', mode=0644)
def nfsgateway(action=None, format=False): import params if action== "start": prepare_rpcbind() if action == "configure": Directory(params.nfs_file_dump_dir, owner = params.hdfs_user, group = params.user_group, ) generate_logfeeder_input_config('hdfs', Template("input.config-hdfs.json.j2", extra_imports=[default])) elif action == "start" or action == "stop": service( action=action, name="nfs3", user=params.root_user, create_pid_dir=True, create_log_dir=True )
def create_topology_mapping(): import params path = params.net_topology_mapping_data_file_path parent_dir = os.path.dirname(path) # only create the parent directory and set its permission if it does not exist if not os.path.exists(parent_dir): Directory(parent_dir, create_parents=True, owner=params.hdfs_user, group=params.user_group) # placing the mappings file in the same folder where the topology script is located File( path, content=Template("topology_mappings.data.j2"), owner=params.hdfs_user, group=params.user_group, # if there is no hadoop components, don't create the script only_if=format("test -d {net_topology_script_dir}"), )
def slider(): import params Directory(params.slider_conf_dir, create_parents=True ) slider_client_config = params.config['configurations'][ 'slider-client'] if 'configurations' in params.config and 'slider-client' in params.config[ 'configurations'] else {} XmlConfig("slider-client.xml", conf_dir=params.slider_conf_dir, configurations=slider_client_config, mode=0644 ) File(format("{slider_conf_dir}/slider-env.sh"), mode=0755, content=InlineTemplate(params.slider_env_sh_template) ) File(format("{slider_conf_dir}/storm-slider-env.sh"), mode=0755, content=Template('storm-slider-env.sh.j2') ) if (params.log4j_props != None): File(format("{params.slider_conf_dir}/log4j.properties"), mode=0644, content=params.log4j_props ) elif (os.path.exists(format("{params.slider_conf_dir}/log4j.properties"))): File(format("{params.slider_conf_dir}/log4j.properties"), mode=0644 ) File(params.slider_tar_gz, owner=params.hdfs_user, group=params.user_group, )
def hdfs(component=None): import params if component == "namenode": directories = params.dfs_name_dir.split(",") Directory(directories, owner=params.hdfs_user, mode="(OI)(CI)F", create_parents=True) File( params.exclude_file_path, content=Template("exclude_hosts_list.j2"), owner=params.hdfs_user, mode="f", ) if params.service_map.has_key(component): service_name = params.service_map[component] ServiceConfig(service_name, action="change_user", username=params.hdfs_user, password=Script.get_password(params.hdfs_user)) if "hadoop-policy" in params.config['configurations']: XmlConfig( "hadoop-policy.xml", conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['hadoop-policy'], owner=params.hdfs_user, mode="f", configuration_attributes=params.config['configuration_attributes'] ['hadoop-policy']) XmlConfig( "hdfs-site.xml", conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['hdfs-site'], owner=params.hdfs_user, mode="f", configuration_attributes=params.config['configuration_attributes'] ['hdfs-site'])
def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) generate_logfeeder_input_config( 'ambari-infra', Template("input.config-ambari-infra.json.j2", extra_imports=[default])) setup_solr_znode_env() start_cmd = format('{solr_bindir}/solr start -cloud -noprompt -s {infra_solr_datadir} -Dsolr.kerberos.name.rules=\'{infra_solr_kerberos_name_rules}\' 2>&1') \ if params.security_enabled else format('{solr_bindir}/solr start -cloud -noprompt -s {infra_solr_datadir} 2>&1') piped_start_cmd = format('{start_cmd} | tee {infra_solr_log}' ) + '; (exit "${PIPESTATUS[0]}")' Execute(piped_start_cmd, environment={ 'SOLR_INCLUDE': format('{infra_solr_conf}/infra-solr-env.sh') }, user=params.infra_solr_user, logoutput=True)