Exemplo n.º 1
0
def download_database_library_if_needed(target_directory=None):
    """
  Downloads the library to use when connecting to the Oozie database, if
  necessary. The library will be downloaded to 'params.target' unless
  otherwise specified.
  :param target_directory: the location where the database library will be
  downloaded to.
  :return:
  """
    import params
    jdbc_drivers = [
        "com.mysql.jdbc.Driver",
        "com.microsoft.sqlserver.jdbc.SQLServerDriver",
        "oracle.jdbc.driver.OracleDriver", "sap.jdbc4.sqlanywhere.IDriver"
    ]

    # check to see if the JDBC driver name is in the list of ones that need to
    # be downloaded
    if params.jdbc_driver_name not in jdbc_drivers:
        return

    # if the target directory is not specified
    if target_directory is None:
        target_jar_with_directory = params.target
    else:
        # create the full path using the supplied target directory and the JDBC JAR
        target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar

    if not os.path.exists(target_jar_with_directory):
        File(params.downloaded_custom_connector,
             content=DownloadSource(params.driver_curl_source))

        if params.sqla_db_used:
            untar_sqla_type2_driver = ('tar', '-xvf',
                                       params.downloaded_custom_connector,
                                       '-C', params.tmp_dir)

            Execute(untar_sqla_type2_driver, sudo=True)

            Execute(
                format(
                    "yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}"
                ))

            Directory(params.jdbc_libs_dir, recursive=True)

            Execute(
                format(
                    "yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))

            Execute(
                format(
                    "{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*"
                ))

        else:
            Execute(('cp', '--remove-destination',
                     params.downloaded_custom_connector,
                     target_jar_with_directory),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

        File(target_jar_with_directory,
             owner=params.oozie_user,
             group=params.user_group)
Exemplo n.º 2
0
def hive_interactive(name=None):
    import params

    MB_TO_BYTES = 1048576

    # if warehouse directory is in DFS
    if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
            params.default_fs).scheme:
        # Create Hive Metastore Warehouse Dir
        params.HdfsResource(params.hive_metastore_warehouse_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=0700)
        # create directories for llap package
        pkg_dir = '/user/' + params.hive_user + '/.yarn'
        for dir in [pkg_dir, pkg_dir + '/package', pkg_dir + '/package/LLAP']:
            # hdfsresouces handles parent creation badly
            params.HdfsResource(dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                group=params.user_group,
                                mode=0755)

        if not is_empty(params.tez_hook_proto_base_directory):
            params.HdfsResource(params.tez_hook_proto_base_directory,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=01755)

        if not is_empty(params.hive_hook_proto_base_directory):
            params.HdfsResource(params.hive_hook_proto_base_directory,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=01777)

            dag_meta = params.tez_hook_proto_base_directory + "dag_meta"
            params.HdfsResource(dag_meta,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=01777)

            dag_data = params.tez_hook_proto_base_directory + "dag_data"
            params.HdfsResource(dag_data,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=01777)

            app_data = params.tez_hook_proto_base_directory + "app_data"
            params.HdfsResource(app_data,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=01777)

    else:
        Logger.info(
            format(
                "Not creating warehouse directory '{hive_metastore_warehouse_dir}', as the location is not in DFS."
            ))

    # Create Hive User Dir
    params.HdfsResource(params.hive_hdfs_user_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.hive_user,
                        mode=params.hive_hdfs_user_mode)

    params.HdfsResource(None, action="execute")

    # list of properties that should be excluded from the config
    # this approach is a compromise against adding a dedicated config
    # type for hive_server_interactive or needed config groups on a
    # per component basis
    exclude_list = ['hive.enforce.bucketing', 'hive.enforce.sorting']

    # List of configs to be excluded from hive2 client, but present in Hive2 server.
    exclude_list_for_hive2_client = [
        'javax.jdo.option.ConnectionPassword',
        'hadoop.security.credential.provider.path'
    ]

    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)
    '''
  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
  '''
    merged_hive_interactive_site = {}
    merged_hive_interactive_site.update(params.hive_site_config)
    merged_hive_interactive_site.update(
        params.config['configurations']['hive-interactive-site'])
    for item in exclude_list:
        if item in merged_hive_interactive_site.keys():
            del merged_hive_interactive_site[item]

    merged_hive_interactive_site[
        'hive.llap.daemon.vcpus.per.instance'] = format(
            merged_hive_interactive_site['hive.llap.daemon.vcpus.per.instance']
        )
    merged_hive_interactive_site[
        'hive.server2.active.passive.ha.enable'] = str(
            params.hive_server_interactive_ha).lower()
    '''
  Config 'hive.llap.io.memory.size' calculated value in stack_advisor is in MB as of now. We need to
  convert it to bytes before we write it down to config file.
  '''
    if 'hive.llap.io.memory.size' in merged_hive_interactive_site.keys():
        hive_llap_io_mem_size_in_mb = merged_hive_interactive_site.get(
            "hive.llap.io.memory.size")
        hive_llap_io_mem_size_in_bytes = long(
            hive_llap_io_mem_size_in_mb) * MB_TO_BYTES
        merged_hive_interactive_site[
            'hive.llap.io.memory.size'] = hive_llap_io_mem_size_in_bytes
        Logger.info(
            "Converted 'hive.llap.io.memory.size' value from '{0} MB' to '{1} Bytes' before writing "
            "it to config file.".format(hive_llap_io_mem_size_in_mb,
                                        hive_llap_io_mem_size_in_bytes))
    '''
  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
  '''
    # Generate atlas-application.properties.xml file
    if params.enable_atlas_hook and params.stack_supports_atlas_hook_for_hive_interactive:
        Logger.info("Setup for Atlas Hive2 Hook started.")

        atlas_hook_filepath = os.path.join(
            params.hive_server_interactive_conf_dir,
            params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.HIVE,
                         params.hive_atlas_application_properties,
                         atlas_hook_filepath, params.hive_user,
                         params.user_group)

        Logger.info("Setup for Atlas Hive2 Hook done.")
    else:
        # Required for HDP 2.5 stacks
        Logger.info(
            "Skipping setup for Atlas Hook, as it is disabled/ not supported.")
        remove_atlas_hook_if_exists(merged_hive_interactive_site)
    '''
  As tez_hive2/tez-site.xml only contains the new + the changed props compared to tez/tez-site.xml,
  we need to merge tez/tez-site.xml and tez_hive2/tez-site.xml and store it in tez_hive2/tez-site.xml.
  '''
    merged_tez_interactive_site = {}
    if 'tez-site' in params.config['configurations']:
        merged_tez_interactive_site.update(
            params.config['configurations']['tez-site'])
        Logger.info(
            "Retrieved 'tez/tez-site' for merging with 'tez_hive2/tez-interactive-site'."
        )
    else:
        Logger.error(
            "Tez's 'tez-site' couldn't be retrieved from passed-in configurations."
        )

    merged_tez_interactive_site.update(
        params.config['configurations']['tez-interactive-site'])

    XmlConfig("tez-site.xml",
              conf_dir=params.tez_interactive_conf_dir,
              configurations=merged_tez_interactive_site,
              configuration_attributes=params.config['configurationAttributes']
              ['tez-interactive-site'],
              owner=params.tez_interactive_user,
              group=params.user_group,
              mode=0664)
    '''
  Merge properties from hiveserver2-interactive-site into hiveserver2-site
  '''
    merged_hiveserver2_interactive_site = {}
    if 'hiveserver2-site' in params.config['configurations']:
        merged_hiveserver2_interactive_site.update(
            params.config['configurations']['hiveserver2-site'])
        Logger.info(
            "Retrieved 'hiveserver2-site' for merging with 'hiveserver2-interactive-site'."
        )
    else:
        Logger.error(
            "'hiveserver2-site' couldn't be retrieved from passed-in configurations."
        )
    merged_hiveserver2_interactive_site.update(
        params.config['configurations']['hiveserver2-interactive-site'])

    # Create config files under hive_server_interactive_conf_dir:
    #   hive-site.xml
    #   hive-env.sh
    #   llap-daemon-log4j2.properties
    #   llap-cli-log4j2.properties
    #   hive-log4j2.properties
    #   hive-exec-log4j2.properties
    #   beeline-log4j2.properties

    hive_server_interactive_conf_dir = params.hive_server_interactive_conf_dir

    mode_identified = 0600
    merged_hive_interactive_site = update_credential_provider_path(
        merged_hive_interactive_site, 'hive-site',
        os.path.join(conf_dir, 'hive-site.jceks'), params.hive_user,
        params.user_group)
    XmlConfig("hive-site.xml",
              conf_dir=hive_server_interactive_conf_dir,
              configurations=merged_hive_interactive_site,
              configuration_attributes=params.config['configurationAttributes']
              ['hive-interactive-site'],
              owner=params.hive_user,
              group=params.user_group,
              mode=0644)
    XmlConfig("hiveserver2-site.xml",
              conf_dir=hive_server_interactive_conf_dir,
              configurations=merged_hiveserver2_interactive_site,
              configuration_attributes=params.config['configurationAttributes']
              ['hiveserver2-interactive-site'],
              owner=params.hive_user,
              group=params.user_group,
              mode=mode_identified)

    File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.hive_interactive_env_sh_template))

    llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
    File(format(
        "{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.llap_daemon_log4j))

    llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
    File(format(
        "{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.llap_cli_log4j2))

    hive_log4j2_filename = 'hive-log4j2.properties'
    File(format("{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.hive_log4j2))

    hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
    File(format(
        "{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.hive_exec_log4j2))

    beeline_log4j2_filename = 'beeline-log4j2.properties'
    File(
        format("{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
        mode=mode_identified,
        group=params.user_group,
        owner=params.hive_user,
        content=InlineTemplate(params.beeline_log4j2))

    XmlConfig("beeline-site.xml",
              conf_dir=conf_dir,
              configurations=params.beeline_site_config,
              owner=params.hive_user,
              group=params.user_group,
              mode=mode_identified)

    File(os.path.join(hive_server_interactive_conf_dir,
                      "hadoop-metrics2-hiveserver2.properties"),
         owner=params.hive_user,
         group=params.user_group,
         mode=mode_identified,
         content=Template("hadoop-metrics2-hiveserver2.properties.j2"))

    File(format(
        "{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"
    ),
         owner=params.hive_user,
         group=params.user_group,
         mode=mode_identified,
         content=Template("hadoop-metrics2-llapdaemon.j2"))

    File(format(
        "{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"
    ),
         owner=params.hive_user,
         group=params.user_group,
         mode=mode_identified,
         content=Template("hadoop-metrics2-llaptaskscheduler.j2"))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if not os.path.exists(params.target_hive_interactive):
        jdbc_connector(params.target_hive_interactive,
                       params.hive_intaractive_previous_jdbc_jar)

    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
         content=DownloadSource(
             format("{jdk_location}/{check_db_connection_jar_name}")),
         mode=0644)
    File(params.start_hiveserver2_interactive_path,
         mode=0755,
         content=Template(format('{start_hiveserver2_interactive_script}')))

    Directory(params.hive_pid_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_log_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_interactive_var_lib,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    generate_logfeeder_input_config(
        'hive', Template("input.config-hive.json.j2", extra_imports=[default]))
Exemplo n.º 3
0
def setup_ranger_plugin(component_select_name,
                        service_name,
                        component_downloaded_custom_connector,
                        component_driver_curl_source,
                        component_driver_curl_target,
                        java_home,
                        repo_name,
                        plugin_repo_dict,
                        ranger_env_properties,
                        plugin_properties,
                        policy_user,
                        policymgr_mgr_url,
                        plugin_enabled,
                        conf_dict,
                        component_user,
                        component_group,
                        cache_service_list,
                        plugin_audit_properties,
                        plugin_audit_attributes,
                        plugin_security_properties,
                        plugin_security_attributes,
                        plugin_policymgr_ssl_properties,
                        plugin_policymgr_ssl_attributes,
                        component_list,
                        audit_db_is_enabled,
                        credential_file,
                        xa_audit_db_password,
                        ssl_truststore_password,
                        ssl_keystore_password,
                        api_version=None,
                        hdp_version_override=None):

    if audit_db_is_enabled:
        File(component_downloaded_custom_connector,
             content=DownloadSource(component_driver_curl_source),
             mode=0644)

        Execute(('cp', '--remove-destination',
                 component_downloaded_custom_connector,
                 component_driver_curl_target),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(component_driver_curl_target, mode=0644)

    hdp_version = get_hdp_version(component_select_name)
    if hdp_version_override is not None:
        hdp_version = hdp_version_override

    component_conf_dir = conf_dict

    if plugin_enabled:

        if api_version == 'v2' and api_version is not None:
            ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url)
        else:
            ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url)

        ranger_adm_obj.create_ranger_repository(
            service_name, repo_name, plugin_repo_dict,
            ranger_env_properties['ranger_admin_username'],
            ranger_env_properties['ranger_admin_password'],
            ranger_env_properties['admin_username'],
            ranger_env_properties['admin_password'], policy_user)

        current_datetime = datetime.now()

        File(
            format('{component_conf_dir}/ranger-security.xml'),
            owner=component_user,
            group=component_group,
            mode=0644,
            content=InlineTemplate(
                format(
                    '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'
                )))

        Directory([
            os.path.join('/etc', 'ranger', repo_name),
            os.path.join('/etc', 'ranger', repo_name, 'policycache')
        ],
                  owner=component_user,
                  group=component_group,
                  mode=0775,
                  recursive=True,
                  cd_access='a')

        for cache_service in cache_service_list:
            File(os.path.join('/etc', 'ranger', repo_name, 'policycache',
                              format('{cache_service}_{repo_name}.json')),
                 owner=component_user,
                 group=component_group,
                 mode=0644)

        XmlConfig(format('ranger-{service_name}-audit.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_audit_properties,
                  configuration_attributes=plugin_audit_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        XmlConfig(format('ranger-{service_name}-security.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_security_properties,
                  configuration_attributes=plugin_security_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        if str(service_name).lower() == 'yarn':
            XmlConfig("ranger-policymgr-ssl-yarn.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)
        else:
            XmlConfig("ranger-policymgr-ssl.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)

        #This should be done by rpm
        #setup_ranger_plugin_jar_symblink(hdp_version, service_name, component_list)

        setup_ranger_plugin_keystore(service_name, audit_db_is_enabled,
                                     hdp_version, credential_file,
                                     xa_audit_db_password,
                                     ssl_truststore_password,
                                     ssl_keystore_password, component_user,
                                     component_group, java_home)

    else:
        File(format('{component_conf_dir}/ranger-security.xml'),
             action="delete")
Exemplo n.º 4
0
def jdbc_connector(target, hive_previous_jdbc_jar):
  """
  Shared by Hive Batch, Hive Metastore, and Hive Interactive
  :param target: Target of jdbc jar name, which could be for any of the components above.
  """
  import params

  if not params.jdbc_jar_name:
    return

  if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
    environment = {
      "no_proxy": format("{ambari_server_hostname}")
    }

    if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
      File(hive_previous_jdbc_jar, action='delete')

    # TODO: should be removed after ranger_hive_plugin will not provide jdbc
    if params.prepackaged_jdbc_name != params.jdbc_jar_name:
      Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
              path=["/bin", "/usr/bin/"],
              sudo = True)
    
    File(params.downloaded_custom_connector,
         content = DownloadSource(params.driver_curl_source))

    # maybe it will be more correcvly to use db type
    if params.sqla_db_used:
      untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)

      Execute(untar_sqla_type2_driver, sudo = True)

      Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))

      Directory(params.jdbc_libs_dir,
                create_parents = True)

      Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))

      Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))

    else:
      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
            path=["/bin", "/usr/bin/"],
            sudo = True)

  else:
    #for default hive db (Mysql)
    File(params.downloaded_custom_connector, content = DownloadSource(params.driver_curl_source))
    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
            path=["/bin", "/usr/bin/"],
            sudo=True
    )
  pass

  File(target,
       mode = 0644,
  )
Exemplo n.º 5
0
def hive_interactive(name=None):
  import params
  MB_TO_BYTES = 1048576

  # Create Hive User Dir
  params.HdfsResource(params.hive_hdfs_user_dir,
                      type="directory",
                      action="create_on_execute",
                      owner=params.hive_user,
                      mode=params.hive_hdfs_user_mode
                      )

  # list of properties that should be excluded from the config
  # this approach is a compromise against adding a dedicated config
  # type for hive_server_interactive or needed config groups on a
  # per component basis
  exclude_list = ['hive.enforce.bucketing',
                  'hive.enforce.sorting']

  # List of configs to be excluded from hive2 client, but present in Hive2 server.
  exclude_list_for_hive2_client = ['javax.jdo.option.ConnectionPassword']

  # Copy Tarballs in HDFS.
  if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
    resource_created = copy_to_hdfs("tez_hive2",
                 params.user_group,
                 params.hdfs_user,
                 file_mode=params.tarballs_mode,
                 skip=params.sysprep_skip_copy_tarballs_hdfs)

    if resource_created:
      params.HdfsResource(None, action="execute")

  Directory(params.hive_interactive_etc_dir_prefix,
            mode=0755
            )

  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
  for conf_dir in params.hive_conf_dirs_list:
    fill_conf_dir(conf_dir)

  '''
  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
  '''
  merged_hive_interactive_site = {}
  merged_hive_interactive_site.update(params.config['configurations']['hive-site'])
  merged_hive_interactive_site.update(params.config['configurations']['hive-interactive-site'])
  for item in exclude_list:
    if item in merged_hive_interactive_site.keys():
      del merged_hive_interactive_site[item]

  '''
  Config 'hive.llap.io.memory.size' calculated value in stack_advisor is in MB as of now. We need to
  convert it to bytes before we write it down to config file.
  '''
  if 'hive.llap.io.memory.size' in merged_hive_interactive_site.keys():
    hive_llap_io_mem_size_in_mb = merged_hive_interactive_site.get("hive.llap.io.memory.size")
    hive_llap_io_mem_size_in_bytes = long(hive_llap_io_mem_size_in_mb) * MB_TO_BYTES
    merged_hive_interactive_site['hive.llap.io.memory.size'] = hive_llap_io_mem_size_in_bytes
    Logger.info("Converted 'hive.llap.io.memory.size' value from '{0} MB' to '{1} Bytes' before writing "
                "it to config file.".format(hive_llap_io_mem_size_in_mb, hive_llap_io_mem_size_in_bytes))

  '''
  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
  '''
  remove_atlas_hook_if_exists(merged_hive_interactive_site)

  '''
  As tez_hive2/tez-site.xml only contains the new + the changed props compared to tez/tez-site.xml,
  we need to merge tez/tez-site.xml and tez_hive2/tez-site.xml and store it in tez_hive2/tez-site.xml.
  '''
  merged_tez_interactive_site = {}
  if 'tez-site' in params.config['configurations']:
    merged_tez_interactive_site.update(params.config['configurations']['tez-site'])
    Logger.info("Retrieved 'tez/tez-site' for merging with 'tez_hive2/tez-interactive-site'.")
  else:
    Logger.error("Tez's 'tez-site' couldn't be retrieved from passed-in configurations.")

  merged_tez_interactive_site.update(params.config['configurations']['tez-interactive-site'])
  XmlConfig("tez-site.xml",
            conf_dir = params.tez_interactive_config_dir,
            configurations = merged_tez_interactive_site,
            configuration_attributes=params.config['configuration_attributes']['tez-interactive-site'],
            owner = params.tez_interactive_user,
            group = params.user_group,
            mode = 0664)

  '''
  Merge properties from hiveserver2-interactive-site into hiveserver2-site
  '''
  merged_hiveserver2_interactive_site = {}
  if 'hiveserver2-site' in params.config['configurations']:
    merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-site'])
    Logger.info("Retrieved 'hiveserver2-site' for merging with 'hiveserver2-interactive-site'.")
  else:
    Logger.error("'hiveserver2-site' couldn't be retrieved from passed-in configurations.")
  merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-interactive-site'])


  # Create config files under /etc/hive2/conf and /etc/hive2/conf/conf.server:
  #   hive-site.xml
  #   hive-env.sh
  #   llap-daemon-log4j2.properties
  #   llap-cli-log4j2.properties
  #   hive-log4j2.properties
  #   hive-exec-log4j2.properties
  #   beeline-log4j2.properties

  hive2_conf_dirs_list = params.hive_conf_dirs_list
  hive2_client_conf_path = format("{stack_root}/current/{component_directory}/conf")

  # Making copy of 'merged_hive_interactive_site' in 'merged_hive_interactive_site_copy', and deleting 'javax.jdo.option.ConnectionPassword'
  # config from there, as Hive2 client shouldn't have that config.
  merged_hive_interactive_site_copy = merged_hive_interactive_site.copy()
  for item in exclude_list_for_hive2_client:
    if item in merged_hive_interactive_site.keys():
      del merged_hive_interactive_site_copy[item]

  for conf_dir in hive2_conf_dirs_list:
      mode_identified = 0644 if conf_dir == hive2_client_conf_path else 0600
      if conf_dir == hive2_client_conf_path:
        XmlConfig("hive-site.xml",
                  conf_dir=conf_dir,
                  configurations=merged_hive_interactive_site_copy,
                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0644)
      else:
        XmlConfig("hive-site.xml",
                  conf_dir=conf_dir,
                  configurations=merged_hive_interactive_site,
                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0600)
      XmlConfig("hiveserver2-site.xml",
                conf_dir=conf_dir,
                configurations=merged_hiveserver2_interactive_site,
                configuration_attributes=params.config['configuration_attributes']['hiveserver2-interactive-site'],
                owner=params.hive_user,
                group=params.user_group,
                mode=mode_identified)

      hive_server_interactive_conf_dir = conf_dir

      File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
           owner=params.hive_user,
           group=params.user_group,
           mode=mode_identified,
           content=InlineTemplate(params.hive_interactive_env_sh_template))

      llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
      File(format("{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
           mode=mode_identified,
           group=params.user_group,
           owner=params.hive_user,
           content=params.llap_daemon_log4j)

      llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
      File(format("{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
           mode=mode_identified,
           group=params.user_group,
           owner=params.hive_user,
           content=params.llap_cli_log4j2)

      hive_log4j2_filename = 'hive-log4j2.properties'
      File(format("{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=params.hive_log4j2)

      hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
      File(format("{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=params.hive_exec_log4j2)

      beeline_log4j2_filename = 'beeline-log4j2.properties'
      File(format("{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=params.beeline_log4j2)

      File(os.path.join(hive_server_interactive_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
           owner=params.hive_user,
           group=params.user_group,
           mode=mode_identified,
           content=Template("hadoop-metrics2-hiveserver2.properties.j2")
           )

      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"),
           owner=params.hive_user,
           group=params.user_group,
           mode=mode_identified,
           content=Template("hadoop-metrics2-llapdaemon.j2"))

      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"),
           owner=params.hive_user,
           group=params.user_group,
           mode=mode_identified,
           content=Template("hadoop-metrics2-llaptaskscheduler.j2"))


  # On some OS this folder could be not exists, so we will create it before pushing there files
  Directory(params.limits_conf_dir,
            create_parents = True,
            owner='root',
            group='root')

  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
       owner='root',
       group='root',
       mode=0644,
       content=Template("hive.conf.j2"))

  if not os.path.exists(params.target_hive_interactive):
    jdbc_connector(params.target_hive_interactive, params.hive_intaractive_previous_jdbc_jar)

  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
       mode = 0644)
  File(params.start_hiveserver2_interactive_path,
       mode=0755,
       content=Template(format('{start_hiveserver2_interactive_script}')))

  Directory(params.hive_pid_dir,
            create_parents=True,
            cd_access='a',
            owner=params.hive_user,
            group=params.user_group,
            mode=0755)
  Directory(params.hive_log_dir,
            create_parents=True,
            cd_access='a',
            owner=params.hive_user,
            group=params.user_group,
            mode=0755)
  Directory(params.hive_interactive_var_lib,
            create_parents=True,
            cd_access='a',
            owner=params.hive_user,
            group=params.user_group,
            mode=0755)
Exemplo n.º 6
0
def oozie(is_server=False):
  import params

  if is_server:
    params.HdfsResource(params.oozie_hdfs_user_dir,
                         type="directory",
                         action="create_on_execute",
                         owner=params.oozie_user,
                         mode=params.oozie_hdfs_user_mode
    )
    params.HdfsResource(None, action="execute")
  Directory(params.conf_dir,
             create_parents = True,
             owner = params.oozie_user,
             group = params.user_group
  )
  XmlConfig("oozie-site.xml",
    conf_dir = params.conf_dir,
    configurations = params.oozie_site,
    configuration_attributes=params.config['configuration_attributes']['oozie-site'],
    owner = params.oozie_user,
    group = params.user_group,
    mode = 0664
  )
  File(format("{conf_dir}/oozie-env.sh"),
    owner=params.oozie_user,
    content=InlineTemplate(params.oozie_env_sh_template),
    group=params.user_group,
  )

  if (params.log4j_props != None):
    File(format("{params.conf_dir}/oozie-log4j.properties"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user,
      content=params.log4j_props
    )
  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
    File(format("{params.conf_dir}/oozie-log4j.properties"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user
    )

  File(format("{params.conf_dir}/adminusers.txt"),
    mode=0644,
    group=params.user_group,
    owner=params.oozie_user,
    content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users)
  )

  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
     params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
     params.jdbc_driver_name == "org.postgresql.Driver" or \
     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
      content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
    )
  pass

  oozie_ownership()
  
  if is_server:      
    oozie_server_specific()
Exemplo n.º 7
0
def oozie_server_specific():
    import params

    File(
        params.pid_file,
        action="delete",
        not_if=
        "ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
    )

    oozie_server_directories = [
        format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir,
        params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir,
        params.oozie_lib_dir, params.oozie_webapps_dir,
        params.oozie_webapps_conf_dir, params.oozie_server_dir
    ]
    Directory(
        oozie_server_directories,
        owner=params.oozie_user,
        group=params.user_group,
        mode=0755,
        recursive=True,
        cd_access="a",
    )

    Directory(
        params.oozie_libext_dir,
        recursive=True,
    )

    configure_cmds = []
    configure_cmds.append(
        ('tar', '-xvf', format('{oozie_home}/oozie-sharelib.tar.gz'), '-C',
         params.oozie_home))
    configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
    configure_cmds.append(('chown', format('{oozie_user}:{user_group}'),
                           format('{oozie_libext_dir}/{ext_js_file}')))
    configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'),
                           params.oozie_webapps_conf_dir))

    no_op_test = format(
        "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"
    )
    Execute(
        configure_cmds,
        not_if=no_op_test,
        sudo=True,
    )

    if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
       params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
        File(
            params.downloaded_custom_connector,
            content=DownloadSource(params.driver_curl_source),
        )

        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             params.target),
            #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
            path=["/bin", "/usr/bin/"],
            sudo=True)

    #falcon el extension
    if params.has_falcon_host:
        Execute(
            format(
                'sudo cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'
            ),
            not_if=no_op_test,
        )
        Execute(
            format(
                'sudo chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'
            ),
            not_if=no_op_test,
        )
    if params.lzo_enabled:
        Package(params.lzo_packages_for_current_host)
        Execute(
            format(
                'sudo cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
            not_if=no_op_test,
        )

    Execute(format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war"),
            user=params.oozie_user,
            not_if=no_op_test)

    if params.hdp_stack_version != "" and compare_versions(
            params.hdp_stack_version, '2.2') >= 0:
        # Create hive-site and tez-site configs for oozie
        Directory(params.hive_conf_dir,
                  recursive=True,
                  owner=params.oozie_user,
                  group=params.user_group)
        if 'hive-site' in params.config['configurations']:
            XmlConfig(
                "hive-site.xml",
                conf_dir=params.hive_conf_dir,
                configurations=params.config['configurations']['hive-site'],
                configuration_attributes=params.
                config['configuration_attributes']['hive-site'],
                owner=params.oozie_user,
                group=params.user_group,
                mode=0644)
        if 'tez-site' in params.config['configurations']:
            XmlConfig(
                "tez-site.xml",
                conf_dir=params.hive_conf_dir,
                configurations=params.config['configurations']['tez-site'],
                configuration_attributes=params.
                config['configuration_attributes']['tez-site'],
                owner=params.oozie_user,
                group=params.user_group,
                mode=0664)
    pass
Exemplo n.º 8
0
def setup_ranger_admin(upgrade_type=None):
    import params

    if upgrade_type is None:
        upgrade_type = Script.get_upgrade_type(
            default("/commandParams/upgrade_type", ""))

    ranger_home = params.ranger_home
    ranger_conf = params.ranger_conf

    Directory(ranger_conf,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    copy_jdbc_connector(ranger_home)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}/{check_db_connection_jar_name}")),
        mode=0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
        cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
    else:
        cp = cp + os.pathsep + format("{driver_curl_target}")
    cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")

    db_connection_check_command = format(
        "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}"
    )

    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
        env_dict = {'LD_LIBRARY_PATH': params.ld_lib_path}

    Execute(db_connection_check_command,
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            tries=5,
            try_sleep=10,
            environment=env_dict)

    Execute(
        ('ln', '-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'),
         format('{ranger_home}/conf')),
        not_if=format("ls {ranger_home}/conf"),
        only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
        sudo=True)

    if upgrade_type is not None:
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')

        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    Directory(
        format('{ranger_home}/'),
        owner=params.unix_user,
        group=params.unix_group,
        recursive_ownership=True,
    )

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    File(format('{ranger_conf}/ranger-admin-env.sh'),
         content=format("export JAVA_HOME={java_home}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    if params.stack_supports_pid:
        File(
            format('{ranger_conf}/ranger-admin-env-piddir.sh'),
            content=format(
                "export RANGER_PID_DIR_PATH={ranger_pid_dir}\nexport RANGER_USER={unix_user}"
            ),
            owner=params.unix_user,
            group=params.unix_group,
            mode=0755)

    Directory(params.admin_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True,
              cd_access='a',
              mode=0755)

    File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
         content=format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    if os.path.isfile(params.ranger_admin_default_file):
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.ranger_admin_default_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.security_app_context_file):
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.security_app_context_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)

    if upgrade_type is not None and params.stack_supports_config_versioning:
        if os.path.islink('/usr/bin/ranger-admin'):
            Link('/usr/bin/ranger-admin', action="delete")

        Link('/usr/bin/ranger-admin',
             to=format('{ranger_home}/ews/ranger-admin-services.sh'))

    if default(
            "/configurations/ranger-admin-site/ranger.authentication.method",
            "") == 'PAM':
        d = '/etc/pam.d'
        if os.path.isdir(d):
            if os.path.isfile(os.path.join(d, 'ranger-admin')):
                Logger.info('ranger-admin PAM file already exists.')
            else:
                File(format('{d}/ranger-admin'),
                     content=Template('ranger_admin_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
            if os.path.isfile(os.path.join(d, 'ranger-remote')):
                Logger.info('ranger-remote PAM file already exists.')
            else:
                File(format('{d}/ranger-remote'),
                     content=Template('ranger_remote_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
        else:
            Logger.error(
                "Unable to use PAM authentication, /etc/pam.d/ directory does not exist."
            )

    Execute(('ln', '-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),
             '/usr/bin/ranger-admin'),
            not_if=format("ls /usr/bin/ranger-admin"),
            only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
            sudo=True)

    # remove plain-text password from xml configs

    ranger_admin_site_copy = {}
    ranger_admin_site_copy.update(
        params.config['configurations']['ranger-admin-site'])
    for prop in params.ranger_admin_password_properties:
        if prop in ranger_admin_site_copy:
            ranger_admin_site_copy[prop] = "_"

    XmlConfig("ranger-admin-site.xml",
              conf_dir=ranger_conf,
              configurations=ranger_admin_site_copy,
              configuration_attributes=params.config['configurationAttributes']
              ['ranger-admin-site'],
              owner=params.unix_user,
              group=params.unix_group,
              mode=0644)

    Directory(
        os.path.join(ranger_conf, 'ranger_jaas'),
        mode=0700,
        owner=params.unix_user,
        group=params.unix_group,
    )

    if params.stack_supports_ranger_log4j:
        File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=InlineTemplate(params.admin_log4j),
             mode=0644)

    do_keystore_setup(upgrade_type=upgrade_type)

    create_core_site_xml(ranger_conf)

    if params.stack_supports_ranger_kerberos and params.security_enabled:
        if params.is_hbase_ha_enabled and params.ranger_hbase_plugin_enabled:
            XmlConfig(
                "hbase-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hbase-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hbase-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)

        if params.is_namenode_ha_enabled and params.ranger_hdfs_plugin_enabled:
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)
Exemplo n.º 9
0
def download_database_connector_if_needed():
    """
  Downloads the database connector to use when connecting to the metadata storage
  """
    import params
    if params.streamline_storage_type != 'mysql' and params.streamline_storage_type != 'oracle':
        # In any other case than oracle and mysql, e.g. postgres, just return.
        return

    if params.jdbc_driver_jar == None:
        if "mysql" in params.streamline_storage_type:
            Logger.error(
                "Failed to find mysql-java-connector jar. Make sure you followed the steps to register mysql driver"
            )
            Logger.info("Users should register the mysql java driver jar.")
            Logger.info("yum install mysql-connector-java*")
            Logger.info(
                "sudo ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar"
            )
            raise Fail('Unable to establish jdbc connection to your ' +
                       params.streamline_storage_type + ' instance.')
        if "oracle" in params.streamline_storage_type:
            Logger.error(
                "Failed to find ojdbc jar. Please download and make sure you followed the steps to register oracle jdbc driver"
            )
            Logger.info("Users should register the oracle ojdbc driver jar.")
            Logger.info(
                "Create a symlink e.g. ln -s /usr/share/java/ojdbc6.jar /usr/share/java/ojdbc.jar"
            )
            Logger.info(
                "sudo ambari-server setup --jdbc-db=oracle --jdbc-driver=/usr/share/java/ojdbc.jar"
            )
            raise Fail('Unable to establish jdbc connection to your ' +
                       params.streamline_storage_type + ' instance.')

    File(params.check_db_connection_jar,
         content=DownloadSource(
             format("{jdk_location}/{check_db_connection_jar_name}")))

    target_jar_with_directory = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
    target_jar_bootstrap_dir = params.connector_bootstrap_download_dir + os.path.sep + params.jdbc_driver_jar

    if not os.path.exists(target_jar_with_directory):
        File(params.downloaded_custom_connector,
             content=DownloadSource(params.connector_curl_source))

        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             target_jar_with_directory),
            path=["/bin", "/usr/bin/"],
            sudo=True)

        File(target_jar_with_directory, owner="root", group=params.user_group)

    if not os.path.exists(target_jar_bootstrap_dir):
        File(params.downloaded_custom_connector,
             content=DownloadSource(params.connector_curl_source))

        Execute(('cp', '--remove-destination',
                 params.downloaded_custom_connector, target_jar_bootstrap_dir),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(target_jar_with_directory, owner="root", group=params.user_group)
Exemplo n.º 10
0
def setup_ranger_plugin(component_select_name,
                        service_name,
                        downloaded_custom_connector,
                        driver_curl_source,
                        driver_curl_target,
                        java_home,
                        repo_name,
                        plugin_repo_dict,
                        ranger_env_properties,
                        plugin_properties,
                        policy_user,
                        policymgr_mgr_url,
                        plugin_enabled,
                        component_user,
                        component_group,
                        api_version=None,
                        **kwargs):
    File(downloaded_custom_connector,
         content=DownloadSource(driver_curl_source),
         mode=0644)

    Execute(('cp', '--remove-destination', downloaded_custom_connector,
             driver_curl_target),
            path=["/bin", "/usr/bin/"],
            sudo=True)

    File(driver_curl_target, mode=0644)

    hdp_version = get_hdp_version(component_select_name)
    file_path = format(
        '/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install.properties'
    )

    if not os.path.isfile(file_path):
        raise Fail(
            format(
                'Ranger {service_name} plugin install.properties file does not exist at {file_path}'
            ))

    ModifyPropertiesFile(file_path, properties=plugin_properties)

    custom_plugin_properties = dict()
    custom_plugin_properties['CUSTOM_USER'] = component_user
    custom_plugin_properties['CUSTOM_GROUP'] = component_group
    ModifyPropertiesFile(file_path, properties=custom_plugin_properties)

    if plugin_enabled:
        cmd = (format('enable-{service_name}-plugin.sh'), )
        if api_version == 'v2' and api_version is not None:
            ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url)
        else:
            ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url)

        ranger_adm_obj.create_ranger_repository(
            service_name, repo_name, plugin_repo_dict,
            ranger_env_properties['ranger_admin_username'],
            ranger_env_properties['ranger_admin_password'],
            ranger_env_properties['admin_username'],
            ranger_env_properties['admin_password'], policy_user)
    else:
        cmd = (format('disable-{service_name}-plugin.sh'), )

    cmd_env = {
        'JAVA_HOME': java_home,
        'PWD': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin'),
        'PATH': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin')
    }

    Execute(
        cmd,
        environment=cmd_env,
        logoutput=True,
        sudo=True,
    )
Exemplo n.º 11
0
def hive_interactive(name=None):
    import params

    # list of properties that should be excluded from the config
    # this approach is a compromise against adding a dedicated config
    # type for hive_server_interactive or needed config groups on a
    # per component basis
    exclude_list = ['hive.enforce.bucketing', 'hive.enforce.sorting']

    # Copy Tarballs in HDFS.
    if params.stack_version_formatted_major and check_stack_feature(
            StackFeature.ROLLING_UPGRADE,
            params.stack_version_formatted_major):
        resource_created = copy_to_hdfs(
            "tez_hive2",
            params.user_group,
            params.hdfs_user,
            file_mode=params.tarballs_mode,
            host_sys_prepped=params.host_sys_prepped)

        if resource_created:
            params.HdfsResource(None, action="execute")

    Directory(params.hive_interactive_etc_dir_prefix, mode=0755)

    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)
    '''
  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
  '''
    merged_hive_interactive_site = {}
    merged_hive_interactive_site.update(
        params.config['configurations']['hive-site'])
    merged_hive_interactive_site.update(
        params.config['configurations']['hive-interactive-site'])
    for item in exclude_list:
        if item in merged_hive_interactive_site.keys():
            del merged_hive_interactive_site[item]
    '''
  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
  '''
    remove_atlas_hook_if_exists(merged_hive_interactive_site)

    # Anything TODO for attributes

    # Merge tez-interactive with tez-site
    XmlConfig(
        "tez-site.xml",
        conf_dir=params.tez_interactive_config_dir,
        configurations=params.config['configurations']['tez-interactive-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['tez-interactive-site'],
        owner=params.tez_interactive_user,
        group=params.user_group,
        mode=0664)

    # Create config files under /etc/hive2/conf and /etc/hive2/conf/conf.server:
    #   hive-site.xml
    #   hive-env.sh
    #   llap-daemon-log4j2.properties
    #   llap-cli-log4j2.properties
    #   hive-log4j2.properties
    #   hive-exec-log4j2.properties
    #   beeline-log4j2.properties

    for conf_dir in params.hive_conf_dirs_list:
        XmlConfig(
            "hive-site.xml",
            conf_dir=conf_dir,
            configurations=merged_hive_interactive_site,
            configuration_attributes=params.config['configuration_attributes']
            ['hive-interactive-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

        hive_server_interactive_conf_dir = conf_dir

        File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
             owner=params.hive_user,
             group=params.user_group,
             content=InlineTemplate(params.hive_interactive_env_sh_template))

        llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.llap_daemon_log4j)

        llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.llap_cli_log4j2)

        hive_log4j2_filename = 'hive-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.hive_log4j2)

        hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.hive_exec_log4j2)

        beeline_log4j2_filename = 'beeline-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.beeline_log4j2)

        File(format(
            "{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"
        ),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-llapdaemon.j2"))

        File(format(
            "{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"
        ),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-llaptaskscheduler.j2"))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if not os.path.exists(params.target_hive_interactive):
        jdbc_connector(params.target_hive_interactive)

    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
         content=DownloadSource(
             format("{jdk_location}{check_db_connection_jar_name}")),
         mode=0644)
    File(params.start_hiveserver2_interactive_path,
         mode=0755,
         content=Template(format('{start_hiveserver2_interactive_script}')))

    Directory(params.hive_pid_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_log_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_interactive_var_lib,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
Exemplo n.º 12
0
def setup_ranger_plugin(component_select_name,
                        service_name,
                        previous_jdbc_jar,
                        downloaded_custom_connector,
                        driver_curl_source,
                        driver_curl_target,
                        java_home,
                        repo_name,
                        plugin_repo_dict,
                        ranger_env_properties,
                        plugin_properties,
                        policy_user,
                        policymgr_mgr_url,
                        plugin_enabled,
                        component_user,
                        component_group,
                        api_version=None,
                        skip_if_rangeradmin_down=True,
                        **kwargs):
    if driver_curl_source and not driver_curl_source.endswith("/None"):
        if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
            File(previous_jdbc_jar, action='delete')

        File(downloaded_custom_connector,
             content=DownloadSource(driver_curl_source),
             mode=0644)

        Execute(('cp', '--remove-destination', downloaded_custom_connector,
                 driver_curl_target),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(driver_curl_target, mode=0644)

    if policymgr_mgr_url.endswith('/'):
        policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
    stack_root = Script.get_stack_root().replace('/usr/hdp', '/opt')
    file_path = format(
        '{stack_root}/ranger-{service_name}-plugin/install.properties')
    install_ranger_plugin(service_name)

    if not os.path.isfile(file_path):
        raise Fail(
            format(
                'Ranger {service_name} plugin install.properties file does not exist at {file_path}'
            ))

    ModifyPropertiesFile(file_path, properties=plugin_properties)

    custom_plugin_properties = dict()
    custom_plugin_properties['CUSTOM_USER'] = component_user
    custom_plugin_properties['CUSTOM_GROUP'] = component_group
    ModifyPropertiesFile(file_path, properties=custom_plugin_properties)

    if plugin_enabled:
        cmd = (format('enable-{service_name}-plugin.sh'), )
        if api_version == 'v2' and api_version is not None:
            ranger_adm_obj = RangeradminV2(
                url=policymgr_mgr_url,
                skip_if_rangeradmin_down=skip_if_rangeradmin_down)
        else:
            ranger_adm_obj = Rangeradmin(
                url=policymgr_mgr_url,
                skip_if_rangeradmin_down=skip_if_rangeradmin_down)

        ranger_adm_obj.create_ranger_repository(
            service_name, repo_name, plugin_repo_dict,
            ranger_env_properties['ranger_admin_username'],
            ranger_env_properties['ranger_admin_password'],
            ranger_env_properties['admin_username'],
            ranger_env_properties['admin_password'], policy_user)
    else:
        cmd = (format('disable-{service_name}-plugin.sh'), )

    cmd_env = {
        'JAVA_HOME': java_home,
        'PWD': format('{stack_root}/ranger-{service_name}-plugin'),
        'PATH': format('{stack_root}/ranger-{service_name}-plugin')
    }

    Execute(
        cmd,
        environment=cmd_env,
        logoutput=True,
        sudo=True,
    )
Exemplo n.º 13
0
def oozie(is_server=False, upgrade_type=None):
  import params

  if is_server:
    params.HdfsResource(params.oozie_hdfs_user_dir,
                         type="directory",
                         action="create_on_execute",
                         owner=params.oozie_user,
                         mode=params.oozie_hdfs_user_mode
    )
    params.HdfsResource(None, action="execute")

    generate_logfeeder_input_config('oozie', Template("input.config-oozie.json.j2", extra_imports=[default]))
  Directory(params.conf_dir,
             create_parents = True,
             owner = params.oozie_user,
             group = params.user_group
  )

  params.oozie_site = update_credential_provider_path(params.oozie_site,
                                                      'oozie-site',
                                                      os.path.join(params.conf_dir, 'oozie-site.jceks'),
                                                      params.oozie_user,
                                                      params.user_group,
                                                      use_local_jceks=True
                                                      )

  XmlConfig("oozie-site.xml",
    conf_dir = params.conf_dir,
    configurations = params.oozie_site,
    configuration_attributes=params.config['configurationAttributes']['oozie-site'],
    owner = params.oozie_user,
    group = params.user_group,
    mode = 0664
  )
  File(format("{conf_dir}/oozie-env.sh"),
    owner=params.oozie_user,
    content=InlineTemplate(params.oozie_env_sh_template),
    group=params.user_group,
  )

  # On some OS this folder could be not exists, so we will create it before pushing there files
  Directory(params.limits_conf_dir,
            create_parents=True,
            owner='root',
            group='root'
  )

  File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
       owner='root',
       group='root',
       mode=0644,
       content=Template("oozie.conf.j2")
  )

  if (params.log4j_props != None):
    File(format("{params.conf_dir}/oozie-log4j.properties"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user,
      content=InlineTemplate(params.log4j_props)
    )
  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
    File(format("{params.conf_dir}/oozie-log4j.properties"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user
    )

  if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
    File(format("{params.conf_dir}/adminusers.txt"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user,
      content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users)
    )
  else:
    File ( format("{params.conf_dir}/adminusers.txt"),
           owner = params.oozie_user,
           group = params.user_group
    )

  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
     params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
     params.jdbc_driver_name == "org.postgresql.Driver" or \
     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
      content = DownloadSource(format("{jdk_location}/{check_db_connection_jar_name}")),
    )
  pass

  oozie_ownership()
  
  if params.lzo_enabled:
    install_lzo_if_needed()
    Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
    )
  
  if is_server:
    oozie_server_specific(upgrade_type)
Exemplo n.º 14
0
def setup_ranger_admin(upgrade_type=None):
    import params

    ranger_home = params.ranger_home
    ranger_conf = params.ranger_conf

    Directory(ranger_conf,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    if upgrade_type is not None:
        copy_jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
        cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
    else:
        cp = cp + os.pathsep + format("{driver_curl_target}")
    cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")

    db_connection_check_command = format(
        "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}"
    )

    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
        env_dict = {'LD_LIBRARY_PATH': params.ld_lib_path}

    Execute(db_connection_check_command,
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            tries=5,
            try_sleep=10,
            environment=env_dict)

    Execute(
        ('ln', '-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'),
         format('{ranger_home}/conf')),
        not_if=format("ls {ranger_home}/conf"),
        only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
        sudo=True)

    if upgrade_type is not None:
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')

        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    Directory(
        format('{ranger_home}/'),
        owner=params.unix_user,
        group=params.unix_group,
        recursive_ownership=True,
    )

    Directory(params.admin_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True,
              cd_access='a',
              mode=0755)

    File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
         content=format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    if os.path.isfile(params.ranger_admin_default_file):
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.ranger_admin_default_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.security_app_context_file):
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.security_app_context_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)

    if upgrade_type is not None and params.stack_supports_config_versioning:
        if os.path.islink('/usr/bin/ranger-admin'):
            Link('/usr/bin/ranger-admin', action="delete")

        Link('/usr/bin/ranger-admin',
             to=format('{ranger_home}/ews/ranger-admin-services.sh'))

    Execute(('ln', '-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),
             '/usr/bin/ranger-admin'),
            not_if=format("ls /usr/bin/ranger-admin"),
            only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
            sudo=True)

    XmlConfig(
        "ranger-admin-site.xml",
        conf_dir=ranger_conf,
        configurations=params.config['configurations']['ranger-admin-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-admin-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    Directory(
        os.path.join(ranger_conf, 'ranger_jaas'),
        mode=0700,
        owner=params.unix_user,
        group=params.unix_group,
    )

    do_keystore_setup(upgrade_type=upgrade_type)
Exemplo n.º 15
0
def kms(upgrade_type=None):
  import params

  if params.has_ranger_admin:

    Directory(params.kms_conf_dir,
      owner = params.kms_user,
      group = params.kms_group,
      create_parents = True
    )

    copy_jdbc_connector()

    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
      content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
      mode = 0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
      cp = cp + os.pathsep + format("{kms_home}/ews/webapp/lib/sajdbc4.jar")
    else:
      path_to_jdbc = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
      if not os.path.isfile(path_to_jdbc):
        path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + \
                       params.default_connectors_map[params.db_flavor.lower()] if params.db_flavor.lower() in params.default_connectors_map else None
        if not os.path.isfile(path_to_jdbc):
          path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + "*"
          error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.db_flavor] + \
                " in ranger kms lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
          Logger.error(error_message)

      cp = cp + os.pathsep + path_to_jdbc

    db_connection_check_command = format(
      "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_kms_jdbc_connection_url}' {db_user} {db_password!p} {ranger_kms_jdbc_driver}")
    
    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
      env_dict = {'LD_LIBRARY_PATH':params.ld_library_path}

    Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)

    if params.xa_audit_db_is_enabled and params.driver_source is not None and not params.driver_source.endswith("/None"):
      if params.xa_previous_jdbc_jar and os.path.isfile(params.xa_previous_jdbc_jar):
        File(params.xa_previous_jdbc_jar, action='delete')

      File(params.downloaded_connector_path,
        content = DownloadSource(params.driver_source),
        mode = 0644
      )

      Execute(('cp', '--remove-destination', params.downloaded_connector_path, params.driver_target),
          path=["/bin", "/usr/bin/"],
          sudo=True)

      File(params.driver_target, mode=0644)

    Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF', 'classes', 'lib'),
        mode=0755,
        owner=params.kms_user,
        group=params.kms_group        
      )

    Execute(('cp',format('{kms_home}/ranger-kms-initd'),'/etc/init.d/ranger-kms'),
    not_if=format('ls /etc/init.d/ranger-kms'),
    only_if=format('ls {kms_home}/ranger-kms-initd'),
    sudo=True)

    File('/etc/init.d/ranger-kms',
      mode = 0755
    )

    Directory(format('{kms_home}/'),
              owner = params.kms_user,
              group = params.kms_group,
              recursive_ownership = True,
    )

    Directory(params.ranger_kms_pid_dir,
      mode=0755,
      owner = params.kms_user,
      group = params.user_group,
      cd_access = "a",
      create_parents=True
    )

    if params.stack_supports_pid:
      File(format('{kms_conf_dir}/ranger-kms-env-piddir.sh'),
        content = format("export RANGER_KMS_PID_DIR_PATH={ranger_kms_pid_dir}\nexport KMS_USER={kms_user}"),
        owner = params.kms_user,
        group = params.kms_group,
        mode=0755
      )

    Directory(params.kms_log_dir,
      owner = params.kms_user,
      group = params.kms_group,
      cd_access = 'a',
      create_parents=True,
      mode=0755
    )

    File(format('{kms_conf_dir}/ranger-kms-env-logdir.sh'),
      content = format("export RANGER_KMS_LOG_DIR={kms_log_dir}"),
      owner = params.kms_user,
      group = params.kms_group,
      mode=0755
    )

    Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms'),
      not_if=format('ls /usr/bin/ranger-kms'),
      only_if=format('ls {kms_home}/ranger-kms'),
      sudo=True)

    File('/usr/bin/ranger-kms', mode = 0755)

    Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms-services.sh'),
      not_if=format('ls /usr/bin/ranger-kms-services.sh'),
      only_if=format('ls {kms_home}/ranger-kms'),
      sudo=True)

    File('/usr/bin/ranger-kms-services.sh', mode = 0755)

    Execute(('ln','-sf', format('{kms_home}/ranger-kms-initd'),format('{kms_home}/ranger-kms-services.sh')),
      not_if=format('ls {kms_home}/ranger-kms-services.sh'),
      only_if=format('ls {kms_home}/ranger-kms-initd'),
      sudo=True)

    File(format('{kms_home}/ranger-kms-services.sh'), mode = 0755)

    Directory(params.kms_log_dir,
      owner = params.kms_user,
      group = params.kms_group,
      mode = 0775
    )

    do_keystore_setup(params.credential_provider_path, params.jdbc_alias, params.db_password)
    do_keystore_setup(params.credential_provider_path, params.masterkey_alias, params.kms_master_key_password)
    if params.stack_support_kms_hsm and params.enable_kms_hsm:
      do_keystore_setup(params.credential_provider_path, params.hms_partition_alias, unicode(params.hms_partition_passwd))

    XmlConfig("dbks-site.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['dbks-site'],
      configuration_attributes=params.config['configuration_attributes']['dbks-site'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0644
    )

    XmlConfig("ranger-kms-site.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['ranger-kms-site'],
      configuration_attributes=params.config['configuration_attributes']['ranger-kms-site'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0644
    )

    XmlConfig("kms-site.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['kms-site'],
      configuration_attributes=params.config['configuration_attributes']['kms-site'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0644
    )

    File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
      owner=params.kms_user,
      group=params.kms_group,
      content=params.kms_log4j,
      mode=0644
    )
    if params.security_enabled:
      # core-site.xml linking required by setup for HDFS encryption
      XmlConfig("core-site.xml",
        conf_dir=params.kms_conf_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']['core-site'],
        owner=params.kms_user,
        group=params.kms_group,
        mode=0644
      )
Exemplo n.º 16
0
def copy_jdbc_connector(ranger_home):
    import params

    if params.jdbc_jar_name is None and params.driver_curl_source.endswith(
            "/None"):
        error_message = format(
            "{db_flavor} jdbc driver cannot be downloaded from {jdk_location}\nPlease run 'ambari-server setup --jdbc-db={db_flavor} --jdbc-driver={{path_to_jdbc}}' on ambari-server host."
        )
        raise Fail(error_message)

    if params.driver_curl_source and not params.driver_curl_source.endswith(
            "/None"):
        if params.previous_jdbc_jar and os.path.isfile(
                params.previous_jdbc_jar):
            File(params.previous_jdbc_jar, action='delete')

    File(params.downloaded_custom_connector,
         content=DownloadSource(params.driver_curl_source),
         mode=0644)

    driver_curl_target = format("{ranger_home}/ews/lib/{jdbc_jar_name}")

    if params.db_flavor.lower() == 'sqla':
        Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C',
                 params.tmp_dir),
                sudo=True)

        Execute(('cp', '--remove-destination', params.jar_path_in_archive,
                 os.path.join(ranger_home, 'ews', 'lib')),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(os.path.join(ranger_home, 'ews', 'lib', 'sajdbc4.jar'), mode=0644)

        Directory(params.jdbc_libs_dir, cd_access="a", create_parents=True)

        Execute(as_sudo([
            'yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir
        ],
                        auto_escape=False),
                path=["/bin", "/usr/bin/"])
    else:
        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             os.path.join(ranger_home, 'ews', 'lib')),
            path=["/bin", "/usr/bin/"],
            sudo=True)

        File(os.path.join(ranger_home, 'ews', 'lib', params.jdbc_jar_name),
             mode=0644)

    ModifyPropertiesFile(
        format("{ranger_home}/install.properties"),
        properties=params.config['configurations']['admin-properties'],
        owner=params.unix_user,
    )

    if params.db_flavor.lower() == 'sqla':
        ModifyPropertiesFile(
            format("{ranger_home}/install.properties"),
            properties={
                'SQL_CONNECTOR_JAR':
                format('{ranger_home}/ews/lib/sajdbc4.jar')
            },
            owner=params.unix_user,
        )
    else:
        ModifyPropertiesFile(
            format("{ranger_home}/install.properties"),
            properties={'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
            owner=params.unix_user,
        )
Exemplo n.º 17
0
def copy_jdbc_connector(stack_version=None):
  import params

  if params.jdbc_jar_name is None and params.driver_curl_source.endswith("/None"):
    error_message = "Error! Sorry, but we can't find jdbc driver related to {0} database to download from {1}. \
    Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'".format(params.db_flavor, params.jdk_location)
    Logger.error(error_message)

  if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
    if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
      File(params.previous_jdbc_jar, action='delete')

  kms_home = params.kms_home
  if stack_version is not None:
    kms_home = format("{stack_root}/{stack_version}/ranger-kms")

  driver_curl_target = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")

  File(params.downloaded_custom_connector,
    content = DownloadSource(params.driver_curl_source),
    mode = 0644
  )

  Directory(os.path.join(kms_home, 'ews', 'lib'),
    mode=0755
  )

  if params.db_flavor.lower() == 'sqla':
    Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir), sudo = True)

    Execute(('cp', '--remove-destination', params.jar_path_in_archive, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
      path=["/bin", "/usr/bin/"],
      sudo=True)

    Directory(params.jdbc_libs_dir,
      cd_access="a",
      create_parents=True)

    Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
      path=["/bin", "/usr/bin/"])

    File(os.path.join(kms_home, 'ews', 'webapp', 'lib', 'sajdbc4.jar'), mode=0644)
  else:
    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
      path=["/bin", "/usr/bin/"],
      sudo=True)

    File(os.path.join(kms_home, 'ews', 'webapp', 'lib', params.jdbc_jar_name), mode=0644)

  ModifyPropertiesFile(format("{kms_home}/install.properties"),
    properties = params.config['configurations']['kms-properties'],
    owner = params.kms_user
  )

  if params.db_flavor.lower() == 'sqla':
    ModifyPropertiesFile(format("{kms_home}/install.properties"),
      properties = {'SQL_CONNECTOR_JAR': format('{kms_home}/ews/webapp/lib/sajdbc4.jar')},
      owner = params.kms_user,
    )
  else:
    ModifyPropertiesFile(format("{kms_home}/install.properties"),
      properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
      owner = params.kms_user,
    )
Exemplo n.º 18
0
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # HDP 2.1.* or lower
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, "2.2.0.0") < 0:
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        # HDP 2.2 or higher, copy mapreduce.tar.gz to HDFS
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, '2.2') >= 0:
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account for both HDP 2.1 and 2.2
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file,
                             host_sys_prepped=params.host_sys_prepped)
        # ******* End Copy Tarballs *******
        # *********************************

        # Create Hive Metastore Warehouse Dir
        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=0777)

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    setup_atlas_hive()

    if params.hive_specific_configs_supported and name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if (name == 'metastore'
            or name == 'hiveserver2') and not os.path.exists(params.target):
        jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p}")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p}"),
                params.hive_user)

            # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
            # Fixing it with the hack below:
            quoted_hive_metastore_user_passwd = quote_bash_args(
                quote_bash_args(params.hive_metastore_user_passwd))
            if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
                or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
                quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[
                    1:-1]
            Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(
                check_schema_created_cmd.replace(
                    format("-passWord {quoted_hive_metastore_user_passwd}"),
                    "-passWord " + utils.PASSWORDS_HIDE_STRING))

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

    if name != "client":
        crt_directory(params.hive_pid_dir)
        crt_directory(params.hive_log_dir)
        crt_directory(params.hive_var_lib)
Exemplo n.º 19
0
def setup_ranger_plugin(component_select_name,
                        service_name,
                        previous_jdbc_jar,
                        component_downloaded_custom_connector,
                        component_driver_curl_source,
                        component_driver_curl_target,
                        java_home,
                        repo_name,
                        plugin_repo_dict,
                        ranger_env_properties,
                        plugin_properties,
                        policy_user,
                        policymgr_mgr_url,
                        plugin_enabled,
                        conf_dict,
                        component_user,
                        component_group,
                        cache_service_list,
                        plugin_audit_properties,
                        plugin_audit_attributes,
                        plugin_security_properties,
                        plugin_security_attributes,
                        plugin_policymgr_ssl_properties,
                        plugin_policymgr_ssl_attributes,
                        component_list,
                        audit_db_is_enabled,
                        credential_file,
                        xa_audit_db_password,
                        ssl_truststore_password,
                        ssl_keystore_password,
                        api_version=None,
                        stack_version_override=None,
                        skip_if_rangeradmin_down=True,
                        is_security_enabled=False,
                        is_stack_supports_ranger_kerberos=False,
                        component_user_principal=None,
                        component_user_keytab=None,
                        cred_lib_path_override=None,
                        cred_setup_prefix_override=None):

    if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith(
            "/None"):
        if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
            File(previous_jdbc_jar, action='delete')

        File(component_downloaded_custom_connector,
             content=DownloadSource(component_driver_curl_source),
             mode=0644)

        Execute(('cp', '--remove-destination',
                 component_downloaded_custom_connector,
                 component_driver_curl_target),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(component_driver_curl_target, mode=0644)

    if policymgr_mgr_url.endswith('/'):
        policymgr_mgr_url = policymgr_mgr_url.rstrip('/')

    if stack_version_override is None:
        stack_version = get_stack_version(component_select_name)
    else:
        stack_version = stack_version_override

    component_conf_dir = conf_dict

    if plugin_enabled:

        service_name_exist = False
        policycache_path = os.path.join('/etc', 'ranger', repo_name,
                                        'policycache')
        try:
            for cache_service in cache_service_list:
                policycache_json_file = format(
                    '{policycache_path}/{cache_service}_{repo_name}.json')
                if os.path.isfile(policycache_json_file) and os.path.getsize(
                        policycache_json_file) > 0:
                    with open(policycache_json_file) as json_file:
                        json_data = json.load(json_file)
                        if 'serviceName' in json_data and json_data[
                                'serviceName'] == repo_name:
                            service_name_exist = True
                            Logger.info(
                                "Skipping Ranger API calls, as policy cache file exists for {0}"
                                .format(service_name))
                            Logger.warning(
                                "If service name for {0} is not created on Ranger Admin UI, then to re-create it delete policy cache file: {1}"
                                .format(service_name, policycache_json_file))
                            break
        except Exception, err:
            Logger.error(
                "Error occurred while fetching service name from policy cache file.\nError: {0}"
                .format(err))

        if not service_name_exist:
            if api_version is not None and api_version == 'v2':
                ranger_adm_obj = RangeradminV2(
                    url=policymgr_mgr_url,
                    skip_if_rangeradmin_down=skip_if_rangeradmin_down)
                ranger_adm_obj.create_ranger_repository(
                    service_name, repo_name, plugin_repo_dict,
                    ranger_env_properties['ranger_admin_username'],
                    ranger_env_properties['ranger_admin_password'],
                    ranger_env_properties['admin_username'],
                    ranger_env_properties['admin_password'], policy_user,
                    is_security_enabled, is_stack_supports_ranger_kerberos,
                    component_user, component_user_principal,
                    component_user_keytab)
            else:
                ranger_adm_obj = Rangeradmin(
                    url=policymgr_mgr_url,
                    skip_if_rangeradmin_down=skip_if_rangeradmin_down)
                ranger_adm_obj.create_ranger_repository(
                    service_name, repo_name, plugin_repo_dict,
                    ranger_env_properties['ranger_admin_username'],
                    ranger_env_properties['ranger_admin_password'],
                    ranger_env_properties['admin_username'],
                    ranger_env_properties['admin_password'], policy_user)

        current_datetime = datetime.now()

        File(
            format('{component_conf_dir}/ranger-security.xml'),
            owner=component_user,
            group=component_group,
            mode=0644,
            content=InlineTemplate(
                format(
                    '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'
                )))

        Directory([
            os.path.join('/etc', 'ranger', repo_name),
            os.path.join('/etc', 'ranger', repo_name, 'policycache')
        ],
                  owner=component_user,
                  group=component_group,
                  mode=0775,
                  create_parents=True,
                  cd_access='a')

        for cache_service in cache_service_list:
            File(os.path.join('/etc', 'ranger', repo_name, 'policycache',
                              format('{cache_service}_{repo_name}.json')),
                 owner=component_user,
                 group=component_group,
                 mode=0644)

        # remove plain-text password from xml configs
        plugin_audit_password_property = 'xasecure.audit.destination.db.password'
        plugin_audit_properties_copy = {}
        plugin_audit_properties_copy.update(plugin_audit_properties)

        if plugin_audit_password_property in plugin_audit_properties_copy:
            plugin_audit_properties_copy[
                plugin_audit_password_property] = "crypted"

        XmlConfig(format('ranger-{service_name}-audit.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_audit_properties_copy,
                  configuration_attributes=plugin_audit_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        XmlConfig(format('ranger-{service_name}-security.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_security_properties,
                  configuration_attributes=plugin_security_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        # remove plain-text password from xml configs
        plugin_password_properties = [
            'xasecure.policymgr.clientssl.keystore.password',
            'xasecure.policymgr.clientssl.truststore.password'
        ]
        plugin_policymgr_ssl_properties_copy = {}
        plugin_policymgr_ssl_properties_copy.update(
            plugin_policymgr_ssl_properties)

        for prop in plugin_password_properties:
            if prop in plugin_policymgr_ssl_properties_copy:
                plugin_policymgr_ssl_properties_copy[prop] = "crypted"

        if str(service_name).lower() == 'yarn':
            XmlConfig("ranger-policymgr-ssl-yarn.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties_copy,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)
        else:
            XmlConfig("ranger-policymgr-ssl.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties_copy,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)

        # creating symblink should be done by rpm package
        # setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)

        setup_ranger_plugin_keystore(
            service_name, audit_db_is_enabled, stack_version, credential_file,
            xa_audit_db_password, ssl_truststore_password,
            ssl_keystore_password, component_user, component_group, java_home,
            cred_lib_path_override, cred_setup_prefix_override)
Exemplo n.º 20
0
def hive(name=None):
    import params
    hive_client_conf_path = format(
        "{stack_root}/current/{component_directory}/conf")
    # Permissions 644 for conf dir (client) files, and 600 for conf.server
    mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600
    if name == 'hiveserver2':
        # copy tarball to HDFS feature not supported
        if not (params.stack_version_formatted_major
                and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS,
                                        params.stack_version_formatted_major)):
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
        if params.stack_version_formatted_major and check_stack_feature(
                StackFeature.COPY_TARBALL_TO_HDFS,
                params.stack_version_formatted_major):
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.sysprep_skip_copy_tarballs_hdfs)
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.sysprep_skip_copy_tarballs_hdfs)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file,
                     skip=params.sysprep_skip_copy_tarballs_hdfs)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file,
                     skip=params.sysprep_skip_copy_tarballs_hdfs)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file,
                             skip=params.sysprep_skip_copy_tarballs_hdfs)
        # ******* End Copy Tarballs *******
        # *********************************

        # if warehouse directory is in DFS
        if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
                params.default_fs).scheme:
            # Create Hive Metastore Warehouse Dir
            params.HdfsResource(params.hive_apps_whs_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=0777)
        else:
            Logger.info(
                format(
                    "Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."
                ))

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    params.hive_site_config = update_credential_provider_path(
        params.hive_site_config, 'hive-site',
        os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
        params.hive_user, params.user_group)
    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=mode_identified)

    # Generate atlas-application.properties.xml file
    if params.enable_atlas_hook:
        atlas_hook_filepath = os.path.join(params.hive_config_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.HIVE,
                         params.hive_atlas_application_properties,
                         atlas_hook_filepath, params.hive_user,
                         params.user_group)

    if name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0600)

    if params.hive_metastore_site_supported and name == 'metastore':
        XmlConfig(
            "hivemetastore-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']
            ['hivemetastore-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hivemetastore-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0600)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         mode=mode_identified,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))
    if params.security_enabled:
        File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("zkmigrator_jaas.conf.j2"))

    if name == 'metastore' or name == 'hiveserver2':
        if params.hive_jdbc_target is not None and not os.path.exists(
                params.hive_jdbc_target):
            jdbc_connector(params.hive_jdbc_target,
                           params.hive_previous_jdbc_jar)
        if params.hive2_jdbc_target is not None and not os.path.exists(
                params.hive2_jdbc_target):
            jdbc_connector(params.hive2_jdbc_target,
                           params.hive2_previous_jdbc_jar)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hivemetastore.properties"),
             owner=params.hive_user,
             group=params.user_group,
             mode=0600,
             content=Template("hadoop-metrics2-hivemetastore.properties.j2"))

        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hiveserver2.properties"),
             owner=params.hive_user,
             group=params.user_group,
             mode=0600,
             content=Template("hadoop-metrics2-hiveserver2.properties.j2"))

    if name != "client":
        Directory(params.hive_pid_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_log_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_var_lib,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
Exemplo n.º 21
0
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # BigInsights 4.0.* or lower
        if params.stack_version != "" and compare_versions(
                params.stack_version, "4.1.0.0") < 0:
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        if params.stack_version != "" and compare_versions(
                params.stack_version, '4.0.0.0') >= 0:
            copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file)

        # ******* End Copy Tarballs *******
        # *********************************

        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=0770)
        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        # hive.exec.scratchdir should be created via hive_user
        # otherwise, hive.start.cleanup.scratchdir won't work, because ambari services always started by hive_user
        if not is_empty(params.hive_exec_scratchdir):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    if name == "client":
        permissions = 0644
    else:
        permissions = 0660

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=permissions)

    if params.hive_specific_configs_supported and name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if name == 'metastore' or name == 'hiveserver2':
        jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p}")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p}"),
                params.hive_user)

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

    if name != "client":
        crt_directory(params.hive_pid_dir)
        crt_directory(params.hive_log_dir)
        crt_directory(params.hive_var_lib)
Exemplo n.º 22
0
def falcon(type, action = None, upgrade_type=None):
  import params

  if action == 'config':
    Directory(params.falcon_pid_dir,
      owner = params.falcon_user,
      create_parents = True,
      mode = 0755,
      cd_access = "a",
    )

    Directory(params.falcon_log_dir,
      owner = params.falcon_user,
      create_parents = True,
      mode = 0755,
      cd_access = "a",
    )

    Directory(params.falcon_webapp_dir,
      owner = params.falcon_user,
      create_parents = True)

    Directory(params.falcon_home,
      owner = params.falcon_user,
      create_parents = True)

    Directory(params.etc_prefix_dir,
      mode = 0755,
      create_parents = True)

    Directory(params.falcon_conf_dir,
      owner = params.falcon_user,
      create_parents = True)

    File(params.falcon_conf_dir + '/falcon-env.sh',
      content = InlineTemplate(params.falcon_env_sh_template),
      owner = params.falcon_user,
      group=params.user_group,
    )
    
    PropertiesFile(params.falcon_conf_dir + '/client.properties',
      properties = params.falcon_client_properties,
      mode = 0644,
      owner = params.falcon_user)
      
    PropertiesFile(params.falcon_conf_dir + '/runtime.properties',
      properties = params.falcon_runtime_properties,
      mode = 0644,
      owner = params.falcon_user)

    PropertiesFile(params.falcon_conf_dir + '/startup.properties',
      properties = params.falcon_startup_properties,
      mode = 0644,
      owner = params.falcon_user)

    if params.falcon_graph_storage_directory:
      Directory(params.falcon_graph_storage_directory,
        owner = params.falcon_user,
        group = params.user_group,
        mode = 0775,
        create_parents = True,
        cd_access = "a")

    if params.falcon_graph_serialize_path:
      Directory(params.falcon_graph_serialize_path,
        owner = params.falcon_user,
        group = params.user_group,
        mode = 0775,
        create_parents = True,
        cd_access = "a")

    # Generate atlas-application.properties.xml file
    if has_atlas_in_cluster():
      atlas_hook_filepath = os.path.join(params.falcon_conf_dir, params.atlas_hook_filename)
      setup_atlas_hook(SERVICE.FALCON, params.falcon_atlas_application_properties, atlas_hook_filepath, params.falcon_user, params.user_group)

  if type == 'server':
    if action == 'config':
      if params.store_uri[0:4] == "hdfs":
        params.HdfsResource(params.store_uri,
          type = "directory",
          action = "create_on_execute",
          owner = params.falcon_user,
          mode = 0755)
      elif params.store_uri[0:4] == "file":
        Directory(params.store_uri[7:],
          owner = params.falcon_user,
          create_parents = True)

      # TODO change to proper mode
      params.HdfsResource(params.falcon_apps_dir,
        type = "directory",
        action = "create_on_execute",
        owner = params.falcon_user,
        mode = 0777)

      # In HDP 2.4 and earlier, the data-mirroring directory was copied to HDFS.
      if params.supports_data_mirroring:
        params.HdfsResource(params.dfs_data_mirroring_dir,
          type = "directory",
          action = "create_on_execute",
          owner = params.falcon_user,
          group = params.proxyuser_group,
          recursive_chown = True,
          recursive_chmod = True,
          mode = 0770,
          source = params.local_data_mirroring_dir)

      if params.supports_falcon_extensions:

        params.HdfsResource(params.falcon_extensions_dest_dir,
                            type = "directory",
                            action = "create_on_execute",
                            owner = params.falcon_user,
                            group = params.proxyuser_group,
                            recursive_chown = True,
                            recursive_chmod = True,
                            mode = 0755,
                            source = params.falcon_extensions_source_dir)
        # Create the extensons HiveDR store
        params.HdfsResource(os.path.join(params.falcon_extensions_dest_dir, "mirroring"),
                            type = "directory",
                            action = "create_on_execute",
                            owner = params.falcon_user,
                            group = params.proxyuser_group,
                            mode = 0770)

      # At least one HDFS Dir should be created, so execute the change now.
      params.HdfsResource(None, action = "execute")

      Directory(params.falcon_local_dir,
        owner = params.falcon_user,
        create_parents = True,
        cd_access = "a")

      if params.falcon_embeddedmq_enabled == True:
        Directory(
          os.path.abspath(os.path.join(params.falcon_embeddedmq_data, "..")),
          owner = params.falcon_user,
          create_parents = True)

        Directory(params.falcon_embeddedmq_data,
          owner = params.falcon_user,
          create_parents = True)

    # although Falcon's falcon-config.sh will use 'which hadoop' to figure
    # this out, in an upgraded cluster, it's possible that 'which hadoop'
    # still points to older binaries; it's safer to just pass in the
    # hadoop home directory to use
    environment_dictionary = { "HADOOP_HOME" : params.hadoop_home_dir }

    pid = get_user_call_output.get_user_call_output(format("cat {server_pid_file}"), user=params.falcon_user, is_checked_call=False)[1]
    process_exists = format("ls {server_pid_file} && ps -p {pid}")

    if action == 'start':
      if not os.path.exists(params.target_jar_file):
        try :
          File(params.target_jar_file,
           content = DownloadSource(params.bdb_resource_name))
        except :
           exc_msg = traceback.format_exc()
           exception_message = format("Caught Exception while downloading {bdb_resource_name}:\n{exc_msg}")
           Logger.error(exception_message)

        if not os.path.isfile(params.target_jar_file) :
          error_message = """
If you are using bdb as the Falcon graph db store, please run
ambari-server setup --jdbc-db=bdb --jdbc-driver=<path to je5.0.73.jar
on the ambari server host.  Otherwise falcon startup will fail.
Otherwise please configure Falcon to use HBase as the backend as described
in the Falcon documentation.
"""
          Logger.error(error_message)
      try:
        Execute(format('{falcon_home}/bin/falcon-start -port {falcon_port}'),
          user = params.falcon_user,
          path = params.hadoop_bin_dir,
          environment=environment_dictionary,
          not_if = process_exists,
        )
      except:
        show_logs(params.falcon_log_dir, params.falcon_user)
        raise

    if action == 'stop':
      try:
        Execute(format('{falcon_home}/bin/falcon-stop'),
          user = params.falcon_user,
          path = params.hadoop_bin_dir,
          environment=environment_dictionary)
      except:
        show_logs(params.falcon_log_dir, params.falcon_user)
        raise
      
      File(params.server_pid_file, action = 'delete')
Exemplo n.º 23
0
def kms():
    import params

    if params.has_ranger_admin:

        Directory(params.kms_conf_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  recursive=True)

        if params.xa_audit_db_is_enabled:
            File(params.downloaded_connector_path,
                 content=DownloadSource(params.driver_source),
                 mode=0644)

            Execute(('cp', '--remove-destination',
                     params.downloaded_connector_path, params.driver_target),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

            File(params.driver_target, mode=0644)

        Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF',
                               'classes', 'lib'),
                  mode=0755,
                  owner=params.kms_user,
                  group=params.kms_group)

        Execute(('cp', format('{kms_home}/ranger-kms-initd'),
                 '/etc/init.d/ranger-kms'),
                not_if=format('ls /etc/init.d/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File('/etc/init.d/ranger-kms', mode=0755)

        Execute(('chown', '-R', format('{kms_user}:{kms_group}'),
                 format('{kms_home}/')),
                sudo=True)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms'),
                not_if=format('ls /usr/bin/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms-services.sh'),
                not_if=format('ls /usr/bin/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms-services.sh', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms-initd'),
                 format('{kms_home}/ranger-kms-services.sh')),
                not_if=format('ls {kms_home}/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File(format('{kms_home}/ranger-kms-services.sh'), mode=0755)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775)

        do_keystore_setup(params.credential_provider_path, params.jdbc_alias,
                          params.db_password)
        do_keystore_setup(params.credential_provider_path,
                          params.masterkey_alias,
                          params.kms_master_key_password)

        XmlConfig(
            "dbks-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['dbks-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['dbks-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        XmlConfig(
            "ranger-kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        XmlConfig(
            "kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
             owner=params.kms_user,
             group=params.kms_group,
             content=params.kms_log4j,
             mode=0644)
Exemplo n.º 24
0
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # copy tarball to HDFS feature not supported
        if not (params.stack_version_formatted_major
                and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS,
                                        params.stack_version_formatted_major)):
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
        if params.stack_version_formatted_major and check_stack_feature(
                StackFeature.COPY_TARBALL_TO_HDFS,
                params.stack_version_formatted_major):
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.sysprep_skip_copy_tarballs_hdfs)
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.sysprep_skip_copy_tarballs_hdfs)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file,
                     skip=params.sysprep_skip_copy_tarballs_hdfs)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file,
                     skip=params.sysprep_skip_copy_tarballs_hdfs)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file,
                             skip=params.sysprep_skip_copy_tarballs_hdfs)
        # ******* End Copy Tarballs *******
        # *********************************

        # if warehouse directory is in DFS
        if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
                params.default_fs).scheme:
            # Create Hive Metastore Warehouse Dir
            params.HdfsResource(params.hive_apps_whs_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=0777)
        else:
            Logger.info(
                format(
                    "Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."
                ))

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    # Generate atlas-application.properties.xml file
    if has_atlas_in_cluster():
        atlas_hook_filepath = os.path.join(params.hive_config_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.HIVE,
                         params.hive_atlas_application_properties,
                         atlas_hook_filepath, params.hive_user,
                         params.user_group)

    if name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    if params.hive_metastore_site_supported and name == 'metastore':
        XmlConfig(
            "hivemetastore-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']
            ['hivemetastore-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hivemetastore-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if name == 'metastore' or name == 'hiveserver2':
        if params.hive_jdbc_target is not None and not os.path.exists(
                params.hive_jdbc_target):
            jdbc_connector(params.hive_jdbc_target,
                           params.hive_previous_jdbc_jar)
        if params.hive2_jdbc_target is not None and not os.path.exists(
                params.hive2_jdbc_target):
            jdbc_connector(params.hive2_jdbc_target,
                           params.hive2_previous_jdbc_jar)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hivemetastore.properties"),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-hivemetastore.properties.j2"))

        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_schematool_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p} -verbose")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_schematool_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p} -verbose"),
                params.hive_user)

            # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
            # Fixing it with the hack below:
            quoted_hive_metastore_user_passwd = quote_bash_args(
                quote_bash_args(params.hive_metastore_user_passwd))
            if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
                or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
                quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[
                    1:-1]
            Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(
                check_schema_created_cmd.replace(
                    format("-passWord {quoted_hive_metastore_user_passwd}"),
                    "-passWord " + utils.PASSWORDS_HIDE_STRING))

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hiveserver2.properties"),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-hiveserver2.properties.j2"))

    if name != "client":
        Directory(params.hive_pid_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_log_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_var_lib,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
Exemplo n.º 25
0
def setup_kms_db():
    import params

    if params.has_ranger_admin:

        File(params.downloaded_custom_connector,
             content=DownloadSource(params.driver_curl_source),
             mode=0644)

        Directory(params.java_share_dir,
                  mode=0755,
                  recursive=True,
                  cd_access="a")

        if params.db_flavor.lower() != 'sqla':
            Execute(('cp', '--remove-destination',
                     params.downloaded_custom_connector,
                     params.driver_curl_target),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

            File(params.driver_curl_target, mode=0644)

        Directory(os.path.join(params.kms_home, 'ews', 'lib'), mode=0755)

        if params.db_flavor.lower() == 'sqla':
            Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C',
                     params.tmp_dir),
                    sudo=True)

            Execute(('cp', '--remove-destination', params.jar_path_in_archive,
                     os.path.join(params.kms_home, 'ews', 'webapp', 'lib')),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

            Directory(params.jdbc_libs_dir, cd_access="a", recursive=True)

            Execute(as_sudo([
                'yes', '|', 'cp', params.libs_path_in_archive,
                params.jdbc_libs_dir
            ],
                            auto_escape=False),
                    path=["/bin", "/usr/bin/"])
        else:
            Execute(('cp', '--remove-destination',
                     params.downloaded_custom_connector,
                     os.path.join(params.kms_home, 'ews', 'webapp', 'lib')),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

        File(os.path.join(params.kms_home, 'ews', 'webapp', 'lib',
                          params.jdbc_jar_name),
             mode=0644)

        ModifyPropertiesFile(
            format("/usr/hdp/current/ranger-kms/install.properties"),
            properties=params.config['configurations']['kms-properties'],
            owner=params.kms_user)

        if params.db_flavor.lower() == 'sqla':
            ModifyPropertiesFile(
                format("{kms_home}/install.properties"),
                properties={
                    'SQL_CONNECTOR_JAR':
                    format('{kms_home}/ews/webapp/lib/{jdbc_jar_name}')
                },
                owner=params.kms_user,
            )

        env_dict = {
            'RANGER_KMS_HOME': params.kms_home,
            'JAVA_HOME': params.java_home
        }
        if params.db_flavor.lower() == 'sqla':
            env_dict = {
                'RANGER_KMS_HOME': params.kms_home,
                'JAVA_HOME': params.java_home,
                'LD_LIBRARY_PATH': params.ld_library_path
            }

        dba_setup = format('ambari-python-wrap {kms_home}/dba_script.py -q')
        db_setup = format('ambari-python-wrap {kms_home}/db_setup.py')

        Execute(dba_setup,
                environment=env_dict,
                logoutput=True,
                user=params.kms_user)
        Execute(db_setup,
                environment=env_dict,
                logoutput=True,
                user=params.kms_user)
Exemplo n.º 26
0
def hive(name=None):
  import params
  
  install_lzo_if_needed()

  hive_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
  # Permissions 644 for conf dir (client) files, and 600 for conf.server
  mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600

  Directory(params.hive_etc_dir_prefix,
            mode=0755
  )

  # We should change configurations for client as well as for server.
  # The reason is that stale-configs are service-level, not component.
  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
  for conf_dir in params.hive_conf_dirs_list:
    fill_conf_dir(conf_dir)

  params.hive_site_config = update_credential_provider_path(params.hive_site_config,
                                                     'hive-site',
                                                     os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
                                                     params.hive_user,
                                                     params.user_group
                                                     )
  XmlConfig("hive-site.xml",
            conf_dir=params.hive_config_dir,
            configurations=params.hive_site_config,
            configuration_attributes=params.config['configurationAttributes']['hive-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=mode_identified)

  # Generate atlas-application.properties.xml file
  if params.enable_atlas_hook:
    atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
  
  File(format("{hive_config_dir}/hive-env.sh"),
       owner=params.hive_user,
       group=params.user_group,
       content=InlineTemplate(params.hive_env_sh_template),
       mode=mode_identified
  )

  # On some OS this folder could be not exists, so we will create it before pushing there files
  Directory(params.limits_conf_dir,
            create_parents = True,
            owner='root',
            group='root'
            )

  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
       owner='root',
       group='root',
       mode=0644,
       content=Template("hive.conf.j2")
       )
  if params.security_enabled:
    File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
         owner=params.hive_user,
         group=params.user_group,
         content=Template("zkmigrator_jaas.conf.j2")
         )

  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
       content = DownloadSource(format("{jdk_location}/{check_db_connection_jar_name}")),
       mode = 0644,
  )

  if name != "client":
    setup_non_client()
  if name == 'hiveserver2':
    setup_hiveserver2()
  if name == 'metastore':
    setup_metastore()
Exemplo n.º 27
0
def copy_jdbc_connector(stack_version=None):
    import params

    File(params.downloaded_custom_connector,
         content=DownloadSource(params.driver_curl_source),
         mode=0644)

    ranger_home = params.ranger_home
    if stack_version is not None:
        ranger_home = format("{stack_root}/{stack_version}/ranger-admin")

    if params.db_flavor.lower() == 'sqla':
        Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C',
                 params.tmp_dir),
                sudo=True)

        Execute(('cp', '--remove-destination', params.jar_path_in_archive,
                 os.path.join(ranger_home, 'ews', 'lib')),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(os.path.join(ranger_home, 'ews', 'lib', 'sajdbc4.jar'), mode=0644)

        Directory(params.jdbc_libs_dir, cd_access="a", create_parents=True)

        Execute(as_sudo([
            'yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir
        ],
                        auto_escape=False),
                path=["/bin", "/usr/bin/"])
    else:
        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             os.path.join(ranger_home, 'ews', 'lib')),
            path=["/bin", "/usr/bin/"],
            sudo=True)

        File(os.path.join(ranger_home, 'ews', 'lib', params.jdbc_jar_name),
             mode=0644)

    ModifyPropertiesFile(
        format("{ranger_home}/install.properties"),
        properties=params.config['configurations']['admin-properties'],
        owner=params.unix_user,
    )

    if params.db_flavor.lower() == 'sqla':
        ModifyPropertiesFile(
            format("{ranger_home}/install.properties"),
            properties={
                'SQL_CONNECTOR_JAR':
                format('{ranger_home}/ews/lib/sajdbc4.jar')
            },
            owner=params.unix_user,
        )
    else:
        ModifyPropertiesFile(
            format("{ranger_home}/install.properties"),
            properties={'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
            owner=params.unix_user,
        )
Exemplo n.º 28
0
def oozie(is_server=False):
    import params

    if is_server:
        params.HdfsResource(params.oozie_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.oozie_user,
                            mode=params.oozie_hdfs_user_mode)
        params.HdfsResource(None, action="execute")
    Directory(params.conf_dir,
              recursive=True,
              owner=params.oozie_user,
              group=params.user_group)
    XmlConfig(
        "oozie-site.xml",
        conf_dir=params.conf_dir,
        configurations=params.oozie_site,
        configuration_attributes=params.config['configuration_attributes']
        ['oozie-site'],
        owner=params.oozie_user,
        group=params.user_group,
        mode=0664)
    File(
        format("{conf_dir}/oozie-env.sh"),
        owner=params.oozie_user,
        content=InlineTemplate(params.oozie_env_sh_template),
        group=params.user_group,
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              recursive=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("oozie.conf.j2"))

    if (params.log4j_props != None):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=params.log4j_props)
    elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user)

    if params.hdp_stack_version != "" and compare_versions(
            params.hdp_stack_version, '2.2') >= 0:
        File(format("{params.conf_dir}/adminusers.txt"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=Template('adminusers.txt.j2',
                              oozie_admin_users=params.oozie_admin_users))
    else:
        File(format("{params.conf_dir}/adminusers.txt"),
             owner=params.oozie_user,
             group=params.user_group)

    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
       params.jdbc_driver_name == "org.postgresql.Driver" or \
       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
        )
    pass

    oozie_ownership()

    if is_server:
        oozie_server_specific()
def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
                        component_downloaded_custom_connector, component_driver_curl_source,
                        component_driver_curl_target, java_home,
                        repo_name, plugin_repo_dict,
                        ranger_env_properties, plugin_properties,
                        policy_user, policymgr_mgr_url,
                        plugin_enabled, conf_dict, component_user, component_group,
                        cache_service_list, plugin_audit_properties, plugin_audit_attributes,
                        plugin_security_properties, plugin_security_attributes,
                        plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes,
                        component_list, audit_db_is_enabled, credential_file,
                        xa_audit_db_password, ssl_truststore_password,
                        ssl_keystore_password, api_version=None, stack_version_override = None, skip_if_rangeradmin_down = True,
                        is_security_enabled = False, is_stack_supports_ranger_kerberos = False,
                        component_user_principal = None, component_user_keytab = None, cred_lib_path_override = None, cred_setup_prefix_override = None):

  if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith("/None"):
    if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
      File(previous_jdbc_jar, action='delete')

    File(component_downloaded_custom_connector,
      content = DownloadSource(component_driver_curl_source),
      mode = 0644
    )

    Execute(('cp', '--remove-destination', component_downloaded_custom_connector, component_driver_curl_target),
      path=["/bin", "/usr/bin/"],
      sudo=True
    )

    File(component_driver_curl_target, mode=0644)

  if policymgr_mgr_url.endswith('/'):
    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')

  if stack_version_override is None:
    stack_version = get_stack_version(component_select_name)
  else:
    stack_version = stack_version_override

  component_conf_dir = conf_dict

  if plugin_enabled:

    service_name_exist = get_policycache_service_name(service_name, repo_name, cache_service_list)

    if not service_name_exist:
      if api_version is not None and api_version == 'v2':
        ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
        ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
                                                ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
                                                ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
                                                policy_user, is_security_enabled, is_stack_supports_ranger_kerberos, component_user,
                                                component_user_principal, component_user_keytab)
      else:
        ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
        ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
                                              ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
                                              ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
                                              policy_user)

    current_datetime = datetime.now()

    File(format('{component_conf_dir}/ranger-security.xml'),
      owner = component_user,
      group = component_group,
      mode = 0644,
      content = InlineTemplate(format('<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'))
    )

    Directory([os.path.join('/etc', 'ranger', repo_name), os.path.join('/etc', 'ranger', repo_name, 'policycache')],
      owner = component_user,
      group = component_group,
      mode=0775,
      create_parents = True,
      cd_access = 'a'
    )

    for cache_service in cache_service_list:
      File(os.path.join('/etc', 'ranger', repo_name, 'policycache', format('{cache_service}_{repo_name}.json')),
        owner = component_user,
        group = component_group,
        mode = 0644
      )

    # remove plain-text password from xml configs
    plugin_audit_password_property = 'xasecure.audit.destination.db.password'
    plugin_audit_properties_copy = {}
    plugin_audit_properties_copy.update(plugin_audit_properties)

    if plugin_audit_password_property in plugin_audit_properties_copy:
      plugin_audit_properties_copy[plugin_audit_password_property] = "crypted"

    XmlConfig(format('ranger-{service_name}-audit.xml'),
      conf_dir=component_conf_dir,
      configurations=plugin_audit_properties_copy,
      configuration_attributes=plugin_audit_attributes,
      owner = component_user,
      group = component_group,
      mode=0744)

    XmlConfig(format('ranger-{service_name}-security.xml'),
      conf_dir=component_conf_dir,
      configurations=plugin_security_properties,
      configuration_attributes=plugin_security_attributes,
      owner = component_user,
      group = component_group,
      mode=0744)

    # remove plain-text password from xml configs
    plugin_password_properties = ['xasecure.policymgr.clientssl.keystore.password', 'xasecure.policymgr.clientssl.truststore.password']
    plugin_policymgr_ssl_properties_copy = {}
    plugin_policymgr_ssl_properties_copy.update(plugin_policymgr_ssl_properties)

    for prop in plugin_password_properties:
      if prop in plugin_policymgr_ssl_properties_copy:
        plugin_policymgr_ssl_properties_copy[prop] = "crypted"

    if str(service_name).lower() == 'yarn' :
      XmlConfig("ranger-policymgr-ssl-yarn.xml",
        conf_dir=component_conf_dir,
        configurations=plugin_policymgr_ssl_properties_copy,
        configuration_attributes=plugin_policymgr_ssl_attributes,
        owner = component_user,
        group = component_group,
        mode=0744)
    else:
      XmlConfig("ranger-policymgr-ssl.xml",
        conf_dir=component_conf_dir,
        configurations=plugin_policymgr_ssl_properties_copy,
        configuration_attributes=plugin_policymgr_ssl_attributes,
        owner = component_user,
        group = component_group,
        mode=0744)

    setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
              xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
              component_user, component_group, java_home, cred_lib_path_override, cred_setup_prefix_override)

  else:
    File(format('{component_conf_dir}/ranger-security.xml'),
      action="delete"
    )