def test_attribute_backup(self, isdir_mock, exists_mock, open_mock, ensure_mock, backup_file_mock): """ Tests 'backup' attribute """ isdir_mock.side_effect = [False, True, False, True] open_mock.return_value = MagicMock() exists_mock.return_value = True with Environment('/') as env: File('/directory/file', action='create', mode=0777, backup=False, content='new-content') self.assertEqual(backup_file_mock.call_count, 0) with Environment('/') as env: File('/directory/file', action='create', mode=0777, backup=True, content='new-content') self.assertEqual(backup_file_mock.call_count, 1) backup_file_mock.assert_called_with('/directory/file')
def pig(): import params # ensure that matching LZO libraries are installed for Pig lzo_utils.install_lzo_if_needed() Directory(params.pig_conf_dir, create_parents=True, owner=params.hdfs_user, group=params.user_group) File(format("{pig_conf_dir}/pig-env.sh"), owner=params.hdfs_user, mode=0755, content=InlineTemplate(params.pig_env_sh_template)) # pig_properties is always set to a default even if it's not in the payload File(format("{params.pig_conf_dir}/pig.properties"), mode=0644, group=params.user_group, owner=params.hdfs_user, content=params.pig_properties) if (params.log4j_props is not None): File(format("{params.pig_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.hdfs_user, content=params.log4j_props) elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))): File(format("{params.pig_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.hdfs_user)
def action_create(self): with tempfile.NamedTemporaryFile() as tmpf: with tempfile.NamedTemporaryFile() as old_repo_tmpf: for repo_file_path, repo_file_content in RepositoryProvider.repo_files_content.iteritems( ): repo_file_content = repo_file_content.strip() File(tmpf.name, content=repo_file_content) if os.path.isfile(repo_file_path): # a copy of old repo file, which will be readable by current user File( old_repo_tmpf.name, content=StaticFile(repo_file_path), ) if not os.path.isfile(repo_file_path) or not filecmp.cmp( tmpf.name, old_repo_tmpf.name): Logger.info( format( "Rewriting {repo_file_path} since it has changed." )) File(repo_file_path, content=StaticFile(tmpf.name)) self.update(repo_file_path) RepositoryProvider.repo_files_content.clear()
def action_create(self): with Environment.get_instance_copy() as env: with tempfile.NamedTemporaryFile() as tmpf: repo_file_name = format("{repo_file_name}.list",repo_file_name = self.resource.repo_file_name) repo_file_path = format("{repo_dir}/{repo_file_name}", repo_dir = self.repo_dir) new_content = InlineTemplate(self.resource.repo_template, package_type=self.package_type, base_url=self.resource.base_url, components=' '.join(self.resource.components)).get_content() old_content = '' if self.resource.append_to_file and os.path.isfile(repo_file_path): old_content = sudo.read_file(repo_file_path) + '\n' File(tmpf.name, content=old_content+new_content) if not os.path.isfile(repo_file_path) or not filecmp.cmp(tmpf.name, repo_file_path): File(repo_file_path, content = StaticFile(tmpf.name) ) update_cmd_formatted = [format(x) for x in self.update_cmd] # this is time expensive retcode, out = checked_call(update_cmd_formatted, sudo=True) # add public keys for new repos missing_pkeys = set(re.findall(self.missing_pkey_regex, out)) for pkey in missing_pkeys: Execute(format(self.add_pkey_cmd), timeout = 15, # in case we are on the host w/o internet (using localrepo), we should ignore hanging ignore_failures = True )
def mahout(): import params # ensure that matching LZO libraries are installed for Mahout lzo_utils.install_lzo_if_needed() Directory(params.mahout_conf_dir, create_parents=True, owner=params.mahout_user, group=params.user_group) XmlConfig( "yarn-site.xml", conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['yarn-site'], configuration_attributes=params.config['configuration_attributes'] ['yarn-site'], owner=params.yarn_user, group=params.user_group, mode=0644) if not is_empty(params.log4j_props): File(format("{params.mahout_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.mahout_user, content=params.log4j_props) elif (os.path.exists(format("{params.mahout_conf_dir}/log4j.properties"))): File(format("{params.mahout_conf_dir}/log4j.properties"), mode=0644, group=params.user_group, owner=params.mahout_user)
def download_database_library_if_needed(): """ Downloads the library to use when connecting to the Oozie database, if necessary. The library will be downloaded to 'params.target' unless otherwise specified. :param target_directory: the location where the database library will be downloaded to. :return: """ import params # check to see if the JDBC driver name is in the list of ones that need to # be downloaded if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \ params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver": File( params.downloaded_custom_connector, content=DownloadSource(params.driver_curl_source), ) Execute( ('cp', '--remove-destination', params.downloaded_custom_connector, params.target), #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc path=["/bin", "/usr/bin/"], sudo=True) File(params.target, owner=params.oozie_user, group=params.user_group)
def action_create(self): with tempfile.NamedTemporaryFile() as tmpf: with tempfile.NamedTemporaryFile() as old_repo_tmpf: for repo_file_path, repo_file_content in RepositoryProvider.repo_files_content.iteritems(): repo_file_content = repo_file_content.strip() File(tmpf.name, content=repo_file_content ) if os.path.isfile(repo_file_path): # a copy of old repo file, which will be readable by current user File(old_repo_tmpf.name, content=StaticFile(repo_file_path), ) if not os.path.isfile(repo_file_path) or not filecmp.cmp(tmpf.name, old_repo_tmpf.name): Logger.info(format("Rewriting {repo_file_path} since it has changed.")) File(repo_file_path, content = StaticFile(tmpf.name) ) try: self.update(repo_file_path) except: # remove created file or else ambari will consider that update was successful and skip repository operations File(repo_file_path, action = "delete", ) raise RepositoryProvider.repo_files_content.clear()
def stop(self, env, upgrade_type=None): import params import status_params nifi_toolkit_util_common.copy_toolkit_scripts( params.toolkit_files_dir, params.toolkit_tmp_dir, params.nifi_user, params.nifi_group, upgrade_type, service=nifi_toolkit_util_common.NIFI) run_ca_script = os.path.join(params.toolkit_tmp_dir, 'run_ca.sh') ca_server_script = nifi_toolkit_util_common.get_toolkit_script( 'tls-toolkit.sh', params.toolkit_tmp_dir, params.stack_version_buildnum) File(ca_server_script, mode=0755) File(run_ca_script, mode=0755) if path_isfile(status_params.nifi_ca_pid_file): Execute( (run_ca_script, "stop", params.jdk64_home, ca_server_script, params.nifi_config_dir + '/nifi-certificate-authority.json', params.nifi_ca_log_file_stdout, params.nifi_ca_log_file_stderr, status_params.nifi_ca_pid_file, params.toolkit_ca_check_url), user=params.nifi_user, logoutput=True) try: self.status(env) except ComponentIsNotRunning: unlink(status_params.nifi_ca_pid_file)
def setup_configs(): """ Creates configs for services HDFS mapred """ import params if params.has_namenode or params.dfs_type == 'HCFS': if os.path.exists(params.hadoop_conf_dir): File( params.task_log4j_properties_location, content=StaticFile("task-log4j.properties"), mode=0755) if os.path.exists( os.path.join(params.hadoop_conf_dir, 'configuration.xsl')): File( os.path.join(params.hadoop_conf_dir, 'configuration.xsl'), owner=params.hdfs_user, group=params.user_group) if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')): File( os.path.join(params.hadoop_conf_dir, 'masters'), owner=params.hdfs_user, group=params.user_group) generate_include_file()
def start(self, env, upgrade_type=None): import params import status_params install_nifi() install_nifi_toolkit() nifi_toolkit_util_common.copy_toolkit_scripts( params.toolkit_files_dir, params.toolkit_tmp_dir, params.nifi_user, params.nifi_group, upgrade_type, service=nifi_toolkit_util_common.NIFI) self.configure(env) ca_server_script = nifi_toolkit_util_common.get_toolkit_script( 'tls-toolkit.sh', params.toolkit_tmp_dir, params.stack_version_buildnum) run_ca_script = os.path.join(params.toolkit_tmp_dir, 'run_ca.sh') Directory([params.nifi_config_dir], owner=params.nifi_user, group=params.nifi_group, create_parents=True, recursive_ownership=True) File(ca_server_script, mode=0755) File(run_ca_script, mode=0755) Execute( (run_ca_script, "start", params.jdk64_home, ca_server_script, params.nifi_config_dir + '/nifi-certificate-authority.json', params.nifi_ca_log_file_stdout, params.nifi_ca_log_file_stderr, status_params.nifi_ca_pid_file, params.toolkit_ca_check_url), user=params.nifi_user, logoutput=True) if not os.path.isfile(status_params.nifi_ca_pid_file): raise Exception('Expected pid file to exist')
def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name): import params File(format("{tmp_dir}/{file_name}"), content=StaticFile(file_name), mode=0755) File(format("{tmp_dir}/{prepare_hdfs_file_name}"), content=StaticFile(prepare_hdfs_file_name), mode=0755) os_family = System.get_instance().os_family oozie_examples_dir = glob.glob(params.oozie_examples_regex)[0] Execute(format( "{tmp_dir}/{prepare_hdfs_file_name} {conf_dir} {oozie_examples_dir} {hadoop_conf_dir} " ), tries=3, try_sleep=5, logoutput=True) examples_dir = format('/user/{smokeuser}/examples') params.HdfsResource(examples_dir, action="delete_on_execute", type="directory") params.HdfsResource(examples_dir, action="create_on_execute", type="directory", source=format("{oozie_examples_dir}/examples"), owner=params.smokeuser, group=params.user_group) input_data_dir = format('/user/{smokeuser}/input-data') params.HdfsResource(input_data_dir, action="delete_on_execute", type="directory") params.HdfsResource( input_data_dir, action="create_on_execute", type="directory", source=format("{oozie_examples_dir}/examples/input-data"), owner=params.smokeuser, group=params.user_group) params.HdfsResource(None, action="execute") if params.security_enabled: sh_cmd = format( "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}" ) else: sh_cmd = format( "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}" ) Execute(sh_cmd, path=params.execute_path, tries=3, try_sleep=5, logoutput=True)
def test_ensure_metadata(self, isdir_mock, exists_mock, create_file_mock, stat_mock, chmod_mock, chown_mock, getgrnam_mock, getpwnam_mock): """ Tests if _ensure_metadata changes owner, usergroup and permissions of file to proper values """ isdir_mock.side_effect = [False, True, False, True] exists_mock.return_value = False class stat(): def __init__(self): self.st_mode = 0666 self.st_uid = 1 self.st_gid = 1 stat_mock.return_value = stat() getpwnam_mock.return_value = MagicMock() getpwnam_mock.return_value.pw_uid = 0 getgrnam_mock.return_value = MagicMock() getgrnam_mock.return_value.gr_gid = 0 with Environment('/') as env: File('/directory/file', action='create', mode=0777, content='file-content', owner='root', group='hdfs' ) create_file_mock.assert_called_with('/directory/file', 'file-content', encoding=None) self.assertEqual(create_file_mock.call_count, 1) stat_mock.assert_called_with('/directory/file') self.assertEqual(chmod_mock.call_count, 1) self.assertEqual(chown_mock.call_count, 1) getgrnam_mock.assert_called_once_with('hdfs') getpwnam_mock.assert_called_with('root') chmod_mock.reset_mock() chown_mock.reset_mock() getpwnam_mock.return_value = MagicMock() getpwnam_mock.return_value.pw_uid = 1 getgrnam_mock.return_value = MagicMock() getgrnam_mock.return_value.gr_gid = 1 with Environment('/') as env: File('/directory/file', action='create', mode=0777, content='file-content', owner='root', group='hdfs' ) self.assertEqual(chmod_mock.call_count, 1) chown_mock.assert_called_with('/directory/file', None, None)
def configure(self, env, upgrade_type=None): import params Directory([ params.superset_pid_dir, params.superset_log_dir, params.superset_config_dir, params.superset_home_dir ], mode=0755, cd_access='a', owner=params.superset_user, group=params.user_group, create_parents=True, recursive_ownership=True) File(format("{params.superset_config_dir}/superset-env.sh"), mode=0755, owner=params.superset_user, group=params.user_group, content=InlineTemplate(params.superset_env_sh_template)) File(os.path.join(params.superset_bin_dir, 'superset.sh'), owner=params.superset_user, group=params.user_group, mode=0755, content=Template("superset.sh")) superset_config = mutable_config_dict( params.config["configurations"]["superset"]) if params.superset_db_uri: superset_config["SQLALCHEMY_DATABASE_URI"] = params.superset_db_uri PropertiesFile("superset_config.py", dir=params.superset_config_dir, properties=quote_string_values(superset_config), owner=params.superset_user, group=params.user_group) # Initialize DB and create admin user. Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset db upgrade" ), user=params.superset_user) Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/fabmanager create-admin --app superset --username '{params.superset_admin_user}' --password '{params.superset_admin_password!p}' --firstname '{params.superset_admin_firstname}' --lastname '{params.superset_admin_lastname}' --email '{params.superset_admin_email}'" ), user=params.superset_user) Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset init" ), user=params.superset_user) # Configure Druid Cluster in superset DB if len(params.druid_coordinator_hosts) > 0: Execute(format( "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset configure_druid_cluster --name druid-ambari --coordinator-host {params.druid_coordinator_host} --coordinator-port {params.druid_coordinator_port} --broker-host {params.druid_router_host} --broker-port {params.druid_router_port} --coordinator-endpoint druid/coordinator/v1/metadata --broker-endpoint druid/v2" ), user=params.superset_user)
def setup_ranger_plugin(component_select_name, service_name, downloaded_custom_connector, driver_curl_source, driver_curl_target, java_home, repo_name, plugin_repo_dict, ranger_env_properties, plugin_properties, policy_user, policymgr_mgr_url, plugin_enabled, component_user, component_group, api_version=None, skip_if_rangeradmin_down = True, **kwargs): File(downloaded_custom_connector, content = DownloadSource(driver_curl_source), mode = 0644 ) Execute(('cp', '--remove-destination', downloaded_custom_connector, driver_curl_target), path=["/bin", "/usr/bin/"], sudo=True ) File(driver_curl_target, mode=0644) hdp_version = get_hdp_version(component_select_name) file_path = format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install.properties') if not os.path.isfile(file_path): raise Fail(format('Ranger {service_name} plugin install.properties file does not exist at {file_path}')) ModifyPropertiesFile(file_path, properties = plugin_properties ) custom_plugin_properties = dict() custom_plugin_properties['CUSTOM_USER'] = component_user custom_plugin_properties['CUSTOM_GROUP'] = component_group ModifyPropertiesFile(file_path,properties = custom_plugin_properties) if plugin_enabled: cmd = (format('enable-{service_name}-plugin.sh'),) if api_version == 'v2' and api_version is not None: ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down = skip_if_rangeradmin_down) else: ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down = skip_if_rangeradmin_down) ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict, ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'], ranger_env_properties['admin_username'], ranger_env_properties['admin_password'], policy_user) else: cmd = (format('disable-{service_name}-plugin.sh'),) cmd_env = {'JAVA_HOME': java_home, 'PWD': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin'), 'PATH': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin')} Execute(cmd, environment=cmd_env, logoutput=True, sudo=True, )
def pig(): import params File(os.path.join(params.pig_conf_dir, "pig.properties"), mode="f", owner=params.pig_user, content=params.pig_properties) if (params.log4j_props is not None): File(os.path.join(params.pig_conf_dir, "log4j.properties"), mode='f', owner=params.pig_user, content=params.log4j_props)
def test_ensure_metadata(self, isdir_mock, exists_mock, open_mock, stat_mock, chmod_mock, chown_mock, gid_mock, uid_mock): """ Tests if _ensure_metadata changes owner, usergroup and permissions of file to proper values """ isdir_mock.side_effect = [False, True, False, True] exists_mock.return_value = False class stat(): def __init__(self): self.st_mode = 0666 self.st_uid = 1 self.st_gid = 1 stat_mock.return_value = stat() gid_mock.return_value = 0 uid_mock.return_value = 0 with Environment('/') as env: File('/directory/file', action='create', mode=0777, content='file-content', owner='root', group='hdfs') env.run() open_mock.assert_called_with('/directory/file', 'wb') self.assertEqual(open_mock.call_count, 1) stat_mock.assert_called_with('/directory/file') self.assertEqual(chmod_mock.call_count, 1) self.assertEqual(chown_mock.call_count, 2) gid_mock.assert_called_once_with('hdfs') uid_mock.assert_called_once_with('root') chmod_mock.reset_mock() chown_mock.reset_mock() gid_mock.return_value = 1 uid_mock.return_value = 1 with Environment('/') as env: File('/directory/file', action='create', mode=0777, content='file-content', owner='root', group='hdfs') env.run() self.assertEqual(chmod_mock.call_count, 1) self.assertEqual(chown_mock.call_count, 0)
def action_create(self): with Environment.get_instance_copy() as env: repo_file_name = self.resource.repo_file_name repo_dir = get_repo_dir() new_content = InlineTemplate( self.resource.repo_template, repo_id=self.resource.repo_id, repo_file_name=self.resource.repo_file_name, base_url=self.resource.base_url, mirror_list=self.resource.mirror_list) repo_file_path = format("{repo_dir}/{repo_file_name}.repo") if os.path.isfile(repo_file_path): existing_content_str = sudo.read_file(repo_file_path) new_content_str = new_content.get_content() if existing_content_str != new_content_str and OSCheck.is_suse_family( ): # We need to reset package manager's cache when we replace base urls # at existing repo. That is a case at least under SLES Logger.info( "Flushing package manager cache since repo file content is about to change" ) checked_call(self.update_cmd, sudo=True) if self.resource.append_to_file: content = existing_content_str + '\n' + new_content_str else: content = new_content_str else: # If repo file does not exist yet content = new_content File(repo_file_path, content=content)
def generate_configs(self, env): """ Generates config files and stores them as an archive in tmp_dir based on xml_configs_list and env_configs_list from commandParams """ import params env.set_params(params) xml_configs_list = params.config['commandParams']['xml_configs_list'] env_configs_list = params.config['commandParams']['env_configs_list'] conf_tmp_dir = tempfile.mkdtemp() output_filename = os.path.join( self.get_tmp_dir(), params.config['commandParams']['output_file']) Directory(self.get_tmp_dir(), recursive=True) for file_dict in xml_configs_list: for filename, dict in file_dict.iteritems(): XmlConfig( filename, conf_dir=conf_tmp_dir, configurations=params.config['configurations'][dict], configuration_attributes=params. config['configuration_attributes'][dict], ) for file_dict in env_configs_list: for filename, dict in file_dict.iteritems(): File(os.path.join(conf_tmp_dir, filename), content=InlineTemplate( params.config['configurations'][dict]['content'])) with closing(tarfile.open(output_filename, "w:gz")) as tar: tar.add(conf_tmp_dir, arcname=os.path.basename(".")) tar.close() Directory(conf_tmp_dir, action="delete")
def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home, cred_lib_path_override = None, cred_setup_prefix_override = None): stack_root = Script.get_stack_root() service_name = str(service_name).lower() if cred_lib_path_override is not None: cred_lib_path = cred_lib_path_override else: cred_lib_path = format('{stack_root}/ranger-{service_name}-plugin/install/lib/*') if cred_setup_prefix_override is not None: cred_setup_prefix = cred_setup_prefix_override else: cred_setup_prefix = (format('{stack_root}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path) if audit_db_is_enabled: cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1') Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True) cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslKeyStore', '-v', PasswordString(ssl_keystore_password), '-c', '1') Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True) cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslTrustStore', '-v', PasswordString(ssl_truststore_password), '-c', '1') Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True) File(credential_file, owner = component_user, group = component_group, mode = 0640 )
def service_check(self, env): import params env.set_params(params) unique = get_unique_id_and_date() File("/tmp/wordCount.jar", content=StaticFile("wordCount.jar"), owner=params.storm_user ) cmd = "" if params.nimbus_seeds_supported: # Because this command is guaranteed to run on one of the hosts with storm client, there is no need # to specify "-c nimbus.seeds={nimbus_seeds}" cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique}") elif params.nimbus_host is not None: cmd = format("storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}") Execute(cmd, logoutput=True, path=params.storm_bin_dir, user=params.storm_user ) Execute(format("storm kill WordCount{unique}"), path=params.storm_bin_dir, user=params.storm_user )
def setup_ranger_kafka(): import params if params.has_ranger_admin: from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin if params.retryAble: Logger.info("Kafka: Setup ranger: command retry enables thus retrying if ranger admin is down !") else: Logger.info("Kafka: Setup ranger: command retry not enabled thus skipping if ranger admin is down !") if params.xml_configurations_supported and params.enable_ranger_kafka and params.xa_audit_hdfs_is_enabled: if params.has_namenode: params.HdfsResource("/ranger/audit", type="directory", action="create_on_execute", owner=params.hdfs_user, group=params.hdfs_user, mode=0755, recursive_chmod=True ) params.HdfsResource("/ranger/audit/kafka", type="directory", action="create_on_execute", owner=params.kafka_user, group=params.kafka_user, mode=0700, recursive_chmod=True ) params.HdfsResource(None, action="execute") setup_ranger_plugin('kafka-broker', 'kafka', params.downloaded_custom_connector, params.driver_curl_source, params.driver_curl_target, params.java64_home, params.repo_name, params.kafka_ranger_plugin_repo, params.ranger_env, params.ranger_plugin_properties, params.policy_user, params.policymgr_mgr_url, params.enable_ranger_kafka, conf_dict=params.conf_dir, component_user=params.kafka_user, component_group=params.user_group, cache_service_list=['kafka'], plugin_audit_properties=params.ranger_kafka_audit, plugin_audit_attributes=params.ranger_kafka_audit_attrs, plugin_security_properties=params.ranger_kafka_security, plugin_security_attributes=params.ranger_kafka_security_attrs, plugin_policymgr_ssl_properties=params.ranger_kafka_policymgr_ssl, plugin_policymgr_ssl_attributes=params.ranger_kafka_policymgr_ssl_attrs, component_list=['kafka-broker'], audit_db_is_enabled=params.xa_audit_db_is_enabled, credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password, api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble) if params.enable_ranger_kafka: Execute(('cp', '--remove-destination', params.setup_ranger_env_sh_source, params.setup_ranger_env_sh_target), not_if=format("test -f {setup_ranger_env_sh_target}"), sudo=True ) File(params.setup_ranger_env_sh_target, owner = params.kafka_user, group = params.user_group, mode = 0755 ) else: Logger.info('Ranger admin not installed')
def action_remove(self): with Environment.get_instance_copy() as env: repo_file_name = self.resource.repo_file_name repo_dir = get_repo_dir() File(format("{repo_dir}/{repo_file_name}.repo"), action = "delete")
def test_action_create_replace(self, isdir_mock, exists_mock, open_mock, ensure_mock): """ Tests if 'create' action rewrite existent file with new data """ isdir_mock.side_effect = [False, True] old_file, new_file = MagicMock(), MagicMock() open_mock.side_effect = [old_file, new_file] old_file.read.return_value = 'old-content' exists_mock.return_value = True with Environment('/') as env: File('/directory/file', action='create', mode=0777, backup=False, content='new-content') env.run() old_file.read.assert_called() new_file.__enter__().write.assert_called_with('new-content') ensure_mock.assert_called() self.assertEqual(open_mock.call_count, 2) open_mock.assert_any_call('/directory/file', 'rb') open_mock.assert_any_call('/directory/file', 'wb')
def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, hdp_version, credential_file, xa_audit_db_password, ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home): cred_lib_path = format( '/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install/lib/*') cred_setup_prefix = format( 'python /usr/hdp/{hdp_version}/ranger-{service_name}-plugin/ranger_credential_helper.py -l "{cred_lib_path}"' ) if audit_db_is_enabled: cred_setup = format( '{cred_setup_prefix} -f {credential_file} -k "auditDBCred" -v {xa_audit_db_password!p} -c 1' ) Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True) cred_setup = format( '{cred_setup_prefix} -f {credential_file} -k "sslKeyStore" -v {ssl_keystore_password!p} -c 1' ) Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True) cred_setup = format( '{cred_setup_prefix} -f {credential_file} -k "sslTrustStore" -v {ssl_truststore_password!p} -c 1' ) Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True) File(credential_file, owner=component_user, group=component_group)
def create_topology_script(): import params File(params.net_topology_script_file_path, content=StaticFile('topology_script.py'), mode=0755, only_if=format("test -d {net_topology_script_dir}"))
def action_create(self): filename = self.resource.filename dir = self.resource.dir if dir == None: filepath = filename else: filepath = os.path.join(dir, filename) config_content = InlineTemplate( '''# Generated by Apache Ambari. {{time.asctime(time.localtime())}} {% for key, value in properties_dict|dictsort %} {{key}}{{key_value_delimiter}}{{ resource_management.core.source.InlineTemplate(str(value)).get_content().strip() }}{% endfor %} ''', extra_imports=[ time, resource_management, resource_management.core, resource_management.core.source ], properties_dict=self.resource.properties, key_value_delimiter=self.resource.key_value_delimiter) Logger.info(format("Generating properties file: {filepath}")) with Environment.get_instance_copy() as env: File(format("{filepath}"), content=config_content, owner=self.resource.owner, group=self.resource.group, mode=self.resource.mode)
def action_create(self): filename = self.resource.filename xml_config_provider_config_dir = self.resource.conf_dir # |e - for html-like escaping of <,>,'," config_content = InlineTemplate(''' <configuration> {% for key, value in configurations_dict|dictsort %} <property> <name>{{ key|e }}</name> <value>{{ resource_management.core.source.InlineTemplate(str(value)).get_content() |e }}</value> {%- if not configuration_attrs is none -%} {%- for attrib_name, attrib_occurances in configuration_attrs.items() -%} {%- for property_name, attrib_value in attrib_occurances.items() -%} {% if property_name == key and attrib_name %} <{{attrib_name|e}}>{{attrib_value|e}}</{{attrib_name|e}}> {%- endif -%} {%- endfor -%} {%- endfor -%} {%- endif %} </property> {% endfor %} </configuration>''', extra_imports=[time, resource_management, resource_management.core, resource_management.core.source], configurations_dict=self.resource.configurations, configuration_attrs=self.resource.configuration_attributes) xml_config_dest_file_path = os.path.join(xml_config_provider_config_dir, filename) Logger.info("Generating config: {0}".format(xml_config_dest_file_path)) File (xml_config_dest_file_path, content = config_content, owner = self.resource.owner, group = self.resource.group, mode = self.resource.mode, encoding = self.resource.encoding )
def action_remove(self): with Environment.get_instance_copy() as env: repo_file_name = self.resource.repo_file_name repo_dir = repos_dirs[env.system.os_family] File(format("{repo_dir}/{repo_file_name}.repo"), action = "delete")
def setup_ranger_nifi(upgrade_type=None): import params, os if params.has_ranger_admin and params.enable_ranger_nifi: stack_version = None if upgrade_type is not None: stack_version = params.version if params.retryAble: Logger.info("nifi: Setup ranger: command retry enables thus retrying if ranger admin is down !") else: Logger.info("nifi: Setup ranger: command retry not enabled thus skipping if ranger admin is down !") api_version=None if params.stack_supports_ranger_kerberos: api_version='v2' from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin setup_ranger_plugin('nifi', 'nifi', params.previous_jdbc_jar, params.downloaded_custom_connector, params.driver_curl_source, params.driver_curl_target, params.java_home, params.repo_name, params.nifi_ranger_plugin_repo, params.ranger_env, params.ranger_plugin_properties, params.policy_user, params.policymgr_mgr_url, params.enable_ranger_nifi, conf_dict=params.nifi_config_dir, component_user=params.nifi_user, component_group=params.nifi_group, cache_service_list=['nifi'], plugin_audit_properties=params.config['configurations']['ranger-nifi-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-nifi-audit'], plugin_security_properties=params.config['configurations']['ranger-nifi-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-nifi-security'], plugin_policymgr_ssl_properties=params.config['configurations']['ranger-nifi-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-nifi-policymgr-ssl'], component_list=[], audit_db_is_enabled=params.xa_audit_db_is_enabled, credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password, stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,api_version=api_version, is_security_enabled = params.security_enabled, is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos, component_user_principal=params.ranger_nifi_principal if params.security_enabled else None, component_user_keytab=params.ranger_nifi_keytab if params.security_enabled else None) #change permissions of ranger xml that were written to 0400 File(os.path.join(params.nifi_config_dir, 'ranger-nifi-audit.xml'), owner=params.nifi_user, group=params.nifi_group, mode=0400) File(os.path.join(params.nifi_config_dir, 'ranger-nifi-security.xml'), owner=params.nifi_user, group=params.nifi_group, mode=0400) File(os.path.join(params.nifi_config_dir, 'ranger-policymgr-ssl.xml'), owner=params.nifi_user, group=params.nifi_group, mode=0400) else: Logger.info('Ranger admin not installed')
def create_topology_mapping(): import params File(params.net_topology_mapping_data_file_path, content=Template("topology_mappings.data.j2"), owner=params.hdfs_user, group=params.user_group, only_if=format("test -d {net_topology_script_dir}"))