Beispiel #1
0
def enable_kms_plugin():

    import params

    if params.has_ranger_admin:
        count = 0
        while count < 5:
            ranger_flag = check_ranger_service()
            if ranger_flag:
                break
            else:
                time.sleep(5)  # delay for 5 seconds
                count = count + 1
        else:
            Logger.error(
                "Ranger service is not reachable after {0} tries".format(
                    count))

        current_datetime = datetime.now()

        File(
            format('{kms_conf_dir}/ranger-security.xml'),
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644,
            content=InlineTemplate(
                format(
                    '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'
                )))

        Directory([
            os.path.join('/etc', 'ranger', params.repo_name),
            os.path.join('/etc', 'ranger', params.repo_name, 'policycache')
        ],
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775,
                  recursive=True)

        File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',
                          format('kms_{repo_name}.json')),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644)

        XmlConfig(
            "ranger-kms-audit.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-audit'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-audit'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-kms-security.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-security'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-security'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-policymgr-ssl.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-policymgr-ssl'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-policymgr-ssl'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        if params.xa_audit_db_is_enabled:
            cred_setup = params.cred_setup_prefix + (
                '-f', params.credential_file, '-k', 'auditDBCred', '-v',
                PasswordString(params.xa_audit_db_password), '-c', '1')
            Execute(cred_setup,
                    environment={'JAVA_HOME': params.java_home},
                    logoutput=True,
                    sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslKeyStore', '-v',
            PasswordString(params.ssl_keystore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslTrustStore', '-v',
            PasswordString(params.ssl_truststore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        File(params.credential_file,
             owner=params.kms_user,
             group=params.kms_group,
             mode=0640)
def setup_ranger_plugin(component_select_name,
                        service_name,
                        previous_jdbc_jar,
                        component_downloaded_custom_connector,
                        component_driver_curl_source,
                        component_driver_curl_target,
                        java_home,
                        repo_name,
                        plugin_repo_dict,
                        ranger_env_properties,
                        plugin_properties,
                        policy_user,
                        policymgr_mgr_url,
                        plugin_enabled,
                        conf_dict,
                        component_user,
                        component_group,
                        cache_service_list,
                        plugin_audit_properties,
                        plugin_audit_attributes,
                        plugin_security_properties,
                        plugin_security_attributes,
                        plugin_policymgr_ssl_properties,
                        plugin_policymgr_ssl_attributes,
                        component_list,
                        audit_db_is_enabled,
                        credential_file,
                        xa_audit_db_password,
                        ssl_truststore_password,
                        ssl_keystore_password,
                        api_version=None,
                        stack_version_override=None,
                        skip_if_rangeradmin_down=True,
                        is_security_enabled=False,
                        is_stack_supports_ranger_kerberos=False,
                        component_user_principal=None,
                        component_user_keytab=None):

    if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith(
            "/None"):
        if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
            File(previous_jdbc_jar, action='delete')

        File(component_downloaded_custom_connector,
             content=DownloadSource(component_driver_curl_source),
             mode=0644)

        Execute(('cp', '--remove-destination',
                 component_downloaded_custom_connector,
                 component_driver_curl_target),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(component_driver_curl_target, mode=0644)

    if policymgr_mgr_url.endswith('/'):
        policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
    stack_version = get_stack_version(component_select_name)
    if stack_version_override is not None:
        stack_version = stack_version_override

    component_conf_dir = conf_dict

    if plugin_enabled:

        service_name_exist = False
        policycache_path = os.path.join('/etc', 'ranger', repo_name,
                                        'policycache')
        try:
            for cache_service in cache_service_list:
                policycache_json_file = format(
                    '{policycache_path}/{cache_service}_{repo_name}.json')
                if os.path.isfile(policycache_json_file) and os.path.getsize(
                        policycache_json_file) > 0:
                    with open(policycache_json_file) as json_file:
                        json_data = json.load(json_file)
                        if 'serviceName' in json_data and json_data[
                                'serviceName'] == repo_name:
                            service_name_exist = True
                            Logger.info(
                                "Skipping Ranger API calls, as policy cache file exists for {0}"
                                .format(service_name))
                            break
        except Exception, err:
            Logger.error(
                "Error occurred while fetching service name from policy cache file.\nError: {0}"
                .format(err))

        if not service_name_exist:
            if api_version is not None and api_version == 'v2':
                ranger_adm_obj = RangeradminV2(
                    url=policymgr_mgr_url,
                    skip_if_rangeradmin_down=skip_if_rangeradmin_down)
                ranger_adm_obj.create_ranger_repository(
                    service_name, repo_name, plugin_repo_dict,
                    ranger_env_properties['ranger_admin_username'],
                    ranger_env_properties['ranger_admin_password'],
                    ranger_env_properties['admin_username'],
                    ranger_env_properties['admin_password'], policy_user,
                    is_security_enabled, is_stack_supports_ranger_kerberos,
                    component_user, component_user_principal,
                    component_user_keytab)
            else:
                ranger_adm_obj = Rangeradmin(
                    url=policymgr_mgr_url,
                    skip_if_rangeradmin_down=skip_if_rangeradmin_down)
                ranger_adm_obj.create_ranger_repository(
                    service_name, repo_name, plugin_repo_dict,
                    ranger_env_properties['ranger_admin_username'],
                    ranger_env_properties['ranger_admin_password'],
                    ranger_env_properties['admin_username'],
                    ranger_env_properties['admin_password'], policy_user)

        current_datetime = datetime.now()

        File(
            format('{component_conf_dir}/ranger-security.xml'),
            owner=component_user,
            group=component_group,
            mode=0644,
            content=InlineTemplate(
                format(
                    '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'
                )))

        Directory([
            os.path.join('/etc', 'ranger', repo_name),
            os.path.join('/etc', 'ranger', repo_name, 'policycache')
        ],
                  owner=component_user,
                  group=component_group,
                  mode=0775,
                  create_parents=True,
                  cd_access='a')

        for cache_service in cache_service_list:
            File(os.path.join('/etc', 'ranger', repo_name, 'policycache',
                              format('{cache_service}_{repo_name}.json')),
                 owner=component_user,
                 group=component_group,
                 mode=0644)

        # remove plain-text password from xml configs
        plugin_audit_password_property = 'xasecure.audit.destination.db.password'
        plugin_audit_properties_copy = {}
        plugin_audit_properties_copy.update(plugin_audit_properties)

        if plugin_audit_password_property in plugin_audit_properties_copy:
            plugin_audit_properties_copy[
                plugin_audit_password_property] = "crypted"

        XmlConfig(format('ranger-{service_name}-audit.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_audit_properties_copy,
                  configuration_attributes=plugin_audit_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        XmlConfig(format('ranger-{service_name}-security.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_security_properties,
                  configuration_attributes=plugin_security_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        # remove plain-text password from xml configs
        plugin_password_properties = [
            'xasecure.policymgr.clientssl.keystore.password',
            'xasecure.policymgr.clientssl.truststore.password'
        ]
        plugin_policymgr_ssl_properties_copy = {}
        plugin_policymgr_ssl_properties_copy.update(
            plugin_policymgr_ssl_properties)

        for prop in plugin_password_properties:
            if prop in plugin_policymgr_ssl_properties_copy:
                plugin_policymgr_ssl_properties_copy[prop] = "crypted"

        if str(service_name).lower() == 'yarn':
            XmlConfig("ranger-policymgr-ssl-yarn.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties_copy,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)
        else:
            XmlConfig("ranger-policymgr-ssl.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties_copy,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)

        # creating symblink should be done by rpm package
        # setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)

        setup_ranger_plugin_keystore(service_name, audit_db_is_enabled,
                                     stack_version, credential_file,
                                     xa_audit_db_password,
                                     ssl_truststore_password,
                                     ssl_keystore_password, component_user,
                                     component_group, java_home)
Beispiel #3
0
def knox():
    import params

    directories = [params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir, params.knox_conf_dir, os.path.join(params.knox_conf_dir, "topologies")]
    for directory in directories:
      Directory(directory,
                owner = params.knox_user,
                group = params.knox_group,
                recursive = True,
                cd_access = "a",
                mode = 0755,
      )

    XmlConfig("gateway-site.xml",
              conf_dir=params.knox_conf_dir,
              configurations=params.config['configurations']['gateway-site'],
              configuration_attributes=params.config['configuration_attributes']['gateway-site'],
              owner=params.knox_user,
              group=params.knox_group,
    )

    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
         mode=0644,
         group=params.knox_group,
         owner=params.knox_user,
         content=params.gateway_log4j
    )

    File(format("{params.knox_conf_dir}/topologies/default.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.topology_template)
    )
    File(format("{params.knox_conf_dir}/topologies/admin.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.admin_topology_template)
    )
    if params.security_enabled:
      TemplateConfig( format("{knox_conf_dir}/krb5JAASLogin.conf"),
                      owner = params.knox_user,
                      template_tag = None
      )

    dirs_to_chown = tuple(directories)
    cmd = ('chown','-R',format('{knox_user}:{knox_group}')) + dirs_to_chown
    Execute(cmd,
            sudo = True,
    )

    cmd = format('{knox_client_bin} create-master --master {knox_master_secret!p}')
    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'), params.knox_user)

    Execute(cmd,
            user=params.knox_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=master_secret_exist,
    )

    cmd = format('{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'), params.knox_user)

    Execute(cmd,
            user=params.knox_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=cert_store_exist,
    )
Beispiel #4
0
def enable_kms_plugin():

    import params

    if params.has_ranger_admin:

        ranger_adm_obj = Rangeradmin(url=params.policymgr_mgr_url)
        response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(
            params.policymgr_mgr_url + '/login.jsp', 'test:test')
        if response_code is not None and response_code == 200:
            ambari_ranger_admin, ambari_ranger_password = ranger_adm_obj.create_ambari_admin_user(
                params.ambari_ranger_admin, params.ambari_ranger_password,
                params.admin_uname_password)
            ambari_username_password_for_ranger = ambari_ranger_admin + ':' + ambari_ranger_password
        else:
            raise Fail('Ranger service is not started on given host')

        if ambari_ranger_admin != '' and ambari_ranger_password != '':
            get_repo_flag = get_repo(params.policymgr_mgr_url,
                                     params.repo_name,
                                     ambari_username_password_for_ranger)
            if not get_repo_flag:
                create_repo(params.policymgr_mgr_url,
                            json.dumps(params.kms_ranger_plugin_repo),
                            ambari_username_password_for_ranger)
        else:
            raise Fail('Ambari admin username and password not available')

        current_datetime = datetime.now()

        File(
            format('{kms_conf_dir}/ranger-security.xml'),
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644,
            content=InlineTemplate(
                format(
                    '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'
                )))

        Directory([
            os.path.join('/etc', 'ranger', params.repo_name),
            os.path.join('/etc', 'ranger', params.repo_name, 'policycache')
        ],
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775,
                  recursive=True)

        File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',
                          format('kms_{repo_name}.json')),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644)

        XmlConfig(
            "ranger-kms-audit.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-audit'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-audit'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-kms-security.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-security'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-security'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-policymgr-ssl.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-policymgr-ssl'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-policymgr-ssl'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        if params.xa_audit_db_is_enabled:
            cred_setup = format(
                '{cred_setup_prefix} -f {credential_file} -k "auditDBCred" -v "{xa_audit_db_password}" -c 1'
            )
            Execute(cred_setup,
                    environment={'JAVA_HOME': params.java_home},
                    logoutput=True)

        cred_setup = format(
            '{cred_setup_prefix} -f {credential_file} -k "sslKeyStore" -v "{ssl_keystore_password}" -c 1'
        )
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True)

        cred_setup = format(
            '{cred_setup_prefix} -f {credential_file} -k "sslTrustStore" -v "{ssl_truststore_password}" -c 1'
        )
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True)

        File(params.credential_file,
             owner=params.kms_user,
             group=params.kms_group)
Beispiel #5
0
def hbase(name=None):
    import params

    Directory(params.etc_prefix_dir, mode=0755)

    Directory(params.hbase_conf_dir,
              owner=params.hbase_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.java_io_tmpdir, create_parents=True, mode=0777)

    # If a file location is specified in ioengine parameter,
    # ensure that directory exists. Otherwise create the
    # directory with permissions assigned to hbase:hadoop.
    ioengine_input = params.ioengine_param
    if ioengine_input != None:
        if ioengine_input.startswith("file:/"):
            ioengine_fullpath = ioengine_input[5:]
            ioengine_dir = os.path.dirname(ioengine_fullpath)
            Directory(ioengine_dir,
                      owner=params.hbase_user,
                      group=params.user_group,
                      create_parents=True,
                      mode=0755)

    parent_dir = os.path.dirname(params.tmp_dir)
    # In case if we have several placeholders in path
    while ("${" in parent_dir):
        parent_dir = os.path.dirname(parent_dir)
    if parent_dir != os.path.abspath(os.sep):
        Directory(
            parent_dir,
            create_parents=True,
            cd_access="a",
        )
        Execute(("chmod", "1777", parent_dir), sudo=True)

    XmlConfig(
        "hbase-site.xml",
        conf_dir=params.hbase_conf_dir,
        configurations=params.config['configurations']['hbase-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['hbase-site'],
        owner=params.hbase_user,
        group=params.user_group)

    if check_stack_feature(StackFeature.PHOENIX_CORE_HDFS_SITE_REQUIRED,
                           params.version_for_stack_feature_checks):
        XmlConfig(
            "core-site.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['core-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['core-site'],
            owner=params.hbase_user,
            group=params.user_group)
        if 'hdfs-site' in params.config['configurations']:
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configuration_attributes']['hdfs-site'],
                owner=params.hbase_user,
                group=params.user_group)
    else:
        File(format("{params.hbase_conf_dir}/hdfs-site.xml"), action="delete")
        File(format("{params.hbase_conf_dir}/core-site.xml"), action="delete")

    if 'hbase-policy' in params.config['configurations']:
        XmlConfig(
            "hbase-policy.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hbase-policy'],
            configuration_attributes=params.config['configuration_attributes']
            ['hbase-policy'],
            owner=params.hbase_user,
            group=params.user_group)
    # Manually overriding ownership of file installed by hadoop package
    else:
        File(format("{params.hbase_conf_dir}/hbase-policy.xml"),
             owner=params.hbase_user,
             group=params.user_group)

    File(
        format("{hbase_conf_dir}/hbase-env.sh"),
        owner=params.hbase_user,
        content=InlineTemplate(params.hbase_env_sh_template),
        group=params.user_group,
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hbase.conf.j2"))

    hbase_TemplateConfig(
        params.metric_prop_file_name,
        tag='GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS')

    hbase_TemplateConfig('regionservers')

    if params.security_enabled:
        hbase_TemplateConfig(format("hbase_{name}_jaas.conf"))

    if name != "client":
        Directory(
            params.pid_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        Directory(
            params.log_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

    if (params.log4j_props != None):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user,
             content=InlineTemplate(params.log4j_props))
    elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user)
    if name == "master":
        if not params.hbase_hdfs_root_dir_protocol or params.hbase_hdfs_root_dir_protocol == urlparse(
                params.default_fs).scheme:
            params.HdfsResource(params.hbase_hdfs_root_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hbase_user)
        params.HdfsResource(params.hbase_staging_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hbase_user,
                            mode=0711)
        if params.create_hbase_home_directory:
            params.HdfsResource(params.hbase_home_directory,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hbase_user,
                                mode=0755)
        params.HdfsResource(None, action="execute")

    if params.phoenix_enabled:
        Package(params.phoenix_package,
                retry_on_repo_unavailability=params.
                agent_stack_retry_on_unavailability,
                retry_count=params.agent_stack_retry_count)
Beispiel #6
0
def hive(name=None):
    import params

    hive_client_conf_path = '/etc/hive'
    # Permissions 644 for conf dir (client) files, and 600 for conf.server
    mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    params.hive_site_config = update_credential_provider_path(
        params.hive_site_config, 'hive-site',
        os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
        params.hive_user, params.user_group)
    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=mode_identified)

    # Generate atlas-application.properties.xml file
    if params.enable_atlas_hook:
        script_path = os.path.realpath(__file__).split(
            '/services')[0] + '/hooks/before-INSTALL/scripts/atlas'
        sys.path.append(script_path)
        from setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook, setup_atlas_jar_symlinks
        atlas_hook_filepath = os.path.join(params.hive_config_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.HIVE,
                         params.hive_atlas_application_properties,
                         atlas_hook_filepath, params.hive_user,
                         params.user_group)
        setup_atlas_jar_symlinks("hive", params.hive_lib)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template),
         mode=mode_identified)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))
    if params.security_enabled:
        File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("zkmigrator_jaas.conf.j2"))

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name != "client":
        setup_non_client()
    if name == 'hiveserver2':
        setup_hiveserver2()
    if name == 'metastore':
        setup_metastore()
Beispiel #7
0
def fill_conf_dir(component_conf_dir):
    import params
    hive_client_conf_path = '/etc/hive'
    component_conf_dir = os.path.realpath(component_conf_dir)
    mode_identified_for_file = 0644 if component_conf_dir == hive_client_conf_path else 0600
    mode_identified_for_dir = 0755 if component_conf_dir == hive_client_conf_path else 0700
    Directory(component_conf_dir,
              owner=params.hive_user,
              group=params.user_group,
              create_parents=True,
              mode=mode_identified_for_dir)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=component_conf_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=mode_identified_for_file)

    File(format("{component_conf_dir}/hive-default.xml.template"),
         owner=params.hive_user,
         group=params.user_group,
         mode=mode_identified_for_file)

    File(format("{component_conf_dir}/hive-env.sh.template"),
         owner=params.hive_user,
         group=params.user_group,
         mode=mode_identified_for_file)

    if params.log4j_version == '1':
        log4j_exec_filename = 'hive-exec-log4j.properties'
        if (params.log4j_exec_props != None):
            File(format("{component_conf_dir}/{log4j_exec_filename}"),
                 mode=mode_identified_for_file,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=InlineTemplate(params.log4j_exec_props))
        elif (os.path.exists(
                "{component_conf_dir}/{log4j_exec_filename}.template")):
            File(format("{component_conf_dir}/{log4j_exec_filename}"),
                 mode=mode_identified_for_file,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=StaticFile(
                     format(
                         "{component_conf_dir}/{log4j_exec_filename}.template")
                 ))

        log4j_filename = 'hive-log4j.properties'
        if (params.log4j_props != None):
            File(format("{component_conf_dir}/{log4j_filename}"),
                 mode=mode_identified_for_file,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=InlineTemplate(params.log4j_props))
        elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")
              ):
            File(format("{component_conf_dir}/{log4j_filename}"),
                 mode=mode_identified_for_file,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=StaticFile(
                     format("{component_conf_dir}/{log4j_filename}.template")))

    mode_identified = 0644
    llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
    File(format("{component_conf_dir}/{llap_daemon_log4j_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.llap_daemon_log4j))

    llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
    File(format("{component_conf_dir}/{llap_cli_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.llap_cli_log4j2))

    hive_log4j2_filename = 'hive-log4j2.properties'
    File(format("{component_conf_dir}/{hive_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.hive_log4j2))

    hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
    File(format("{component_conf_dir}/{hive_exec_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.hive_exec_log4j2))

    beeline_log4j2_filename = 'beeline-log4j2.properties'
    File(format("{component_conf_dir}/{beeline_log4j2_filename}"),
         mode=mode_identified,
         group=params.user_group,
         owner=params.hive_user,
         content=InlineTemplate(params.beeline_log4j2))

    if params.parquet_logging_properties is not None:
        File(format("{component_conf_dir}/parquet-logging.properties"),
             mode=mode_identified_for_file,
             group=params.user_group,
             owner=params.hive_user,
             content=params.parquet_logging_properties)
Beispiel #8
0
def setup_ranger_knox(upgrade_type=None):
    import params

    if params.enable_ranger_knox:

        stack_version = None
        if upgrade_type is not None:
            stack_version = params.version

        if params.retryAble:
            Logger.info(
                "Knox: Setup ranger: command retry enables thus retrying if ranger admin is down !"
            )
        else:
            Logger.info(
                "Knox: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
            )

        import sys, os
        script_path = os.path.realpath(__file__).split(
            '/services')[0] + '/hooks/before-INSTALL/scripts/ranger'
        sys.path.append(script_path)

        if params.xml_configurations_supported and params.enable_ranger_knox and params.xa_audit_hdfs_is_enabled:
            if params.has_namenode:
                params.HdfsResource("/ranger/audit",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hdfs_user,
                                    group=params.hdfs_user,
                                    mode=0755,
                                    recursive_chmod=True)
                params.HdfsResource("/ranger/audit/knox",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.knox_user,
                                    group=params.knox_user,
                                    mode=0700,
                                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")

                if params.namenode_hosts is not None and len(
                        params.namenode_hosts) > 1:
                    Logger.info(
                        'Ranger Knox plugin is enabled in NameNode HA environment along with audit to Hdfs enabled, creating hdfs-site.xml'
                    )
                    XmlConfig("hdfs-site.xml",
                              conf_dir=params.knox_conf_dir,
                              configurations=params.config['configurations']
                              ['hdfs-site'],
                              configuration_attributes=params.
                              config['configuration_attributes']['hdfs-site'],
                              owner=params.knox_user,
                              group=params.knox_group,
                              mode=0644)
                else:
                    File(format('{knox_conf_dir}/hdfs-site.xml'),
                         action="delete")

        if params.xml_configurations_supported:
            api_version = None
            if params.stack_supports_ranger_kerberos:
                api_version = 'v2'
            from setup_ranger_plugin_xml import setup_ranger_plugin
            setup_ranger_plugin(
                'knox',
                'knox',
                params.previous_jdbc_jar,
                params.downloaded_custom_connector,
                params.driver_curl_source,
                params.driver_curl_target,
                params.java_home,
                params.repo_name,
                params.knox_ranger_plugin_repo,
                params.ranger_env,
                params.ranger_plugin_properties,
                params.policy_user,
                params.policymgr_mgr_url,
                params.enable_ranger_knox,
                conf_dict=params.knox_conf_dir,
                component_user=params.knox_user,
                component_group=params.knox_group,
                cache_service_list=['knox'],
                plugin_audit_properties=params.config['configurations']
                ['ranger-knox-audit'],
                plugin_audit_attributes=params.
                config['configuration_attributes']['ranger-knox-audit'],
                plugin_security_properties=params.config['configurations']
                ['ranger-knox-security'],
                plugin_security_attributes=params.
                config['configuration_attributes']['ranger-knox-security'],
                plugin_policymgr_ssl_properties=params.config['configurations']
                ['ranger-knox-policymgr-ssl'],
                plugin_policymgr_ssl_attributes=params.config[
                    'configuration_attributes']['ranger-knox-policymgr-ssl'],
                component_list=['knox-server'],
                audit_db_is_enabled=params.xa_audit_db_is_enabled,
                credential_file=params.credential_file,
                xa_audit_db_password=params.xa_audit_db_password,
                ssl_truststore_password=params.ssl_truststore_password,
                ssl_keystore_password=params.ssl_keystore_password,
                stack_version_override=stack_version,
                skip_if_rangeradmin_down=not params.retryAble,
                api_version=api_version,
                is_security_enabled=params.security_enabled,
                is_stack_supports_ranger_kerberos=params.
                stack_supports_ranger_kerberos,
                component_user_principal=params.knox_principal_name
                if params.security_enabled else None,
                component_user_keytab=params.knox_keytab_path
                if params.security_enabled else None)
        else:
            from setup_ranger_plugin import setup_ranger_plugin
            setup_ranger_plugin(
                'knox',
                'knox',
                params.previous_jdbc_jar,
                params.downloaded_custom_connector,
                params.driver_curl_source,
                params.driver_curl_target,
                params.java_home,
                params.repo_name,
                params.knox_ranger_plugin_repo,
                params.ranger_env,
                params.ranger_plugin_properties,
                params.policy_user,
                params.policymgr_mgr_url,
                params.enable_ranger_knox,
                conf_dict=params.knox_conf_dir,
                component_user=params.knox_user,
                component_group=params.knox_group,
                cache_service_list=['knox'],
                plugin_audit_properties=params.config['configurations']
                ['ranger-knox-audit'],
                plugin_audit_attributes=params.
                config['configuration_attributes']['ranger-knox-audit'],
                plugin_security_properties=params.config['configurations']
                ['ranger-knox-security'],
                plugin_security_attributes=params.
                config['configuration_attributes']['ranger-knox-security'],
                plugin_policymgr_ssl_properties=params.config['configurations']
                ['ranger-knox-policymgr-ssl'],
                plugin_policymgr_ssl_attributes=params.config[
                    'configuration_attributes']['ranger-knox-policymgr-ssl'],
                component_list=['knox-server'],
                audit_db_is_enabled=params.xa_audit_db_is_enabled,
                credential_file=params.credential_file,
                xa_audit_db_password=params.xa_audit_db_password,
                ssl_truststore_password=params.ssl_truststore_password,
                ssl_keystore_password=params.ssl_keystore_password,
                stack_version_override=stack_version,
                skip_if_rangeradmin_down=not params.retryAble)
        if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_knox and params.has_namenode and params.security_enabled:
            Logger.info(
                "Stack supports core-site.xml creation for Ranger plugin, creating core-site.xml from namenode configuraitions"
            )
            setup_core_site_for_required_plugins(
                component_user=params.knox_user,
                component_group=params.knox_group,
                create_core_site_path=params.knox_conf_dir,
                config=params.config)
        else:
            Logger.info(
                "Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations"
            )

    else:
        Logger.info('Ranger Knox plugin is not enabled')
Beispiel #9
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(format('{conf_dir}/solr'),
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))

        if not is_empty(params.atlas_admin_username) and not is_empty(
                params.atlas_admin_password):
            psswd_output = hashlib.sha256(
                params.atlas_admin_password).hexdigest()
            ModifyPropertiesFile(
                format("{conf_dir}/users-credentials.properties"),
                properties={
                    format('{atlas_admin_username}'):
                    format('ROLE_ADMIN::{psswd_output}')
                },
                owner=params.metadata_user)

        files_to_chown = [
            format("{conf_dir}/policy-store.txt"),
            format("{conf_dir}/users-credentials.properties")
        ]
        for file in files_to_chown:
            if os.path.exists(file):
                Execute(
                    ('chown', format('{metadata_user}:{user_group}'), file),
                    sudo=True)
                Execute(('chmod', '644', file), sudo=True)

        if params.metadata_solrconfig_content:
            File(format("{conf_dir}/solr/solrconfig.xml"),
                 mode=0644,
                 owner=params.metadata_user,
                 group=params.user_group,
                 content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
        solr_cloud_util.setup_solr_client(params.config)
        check_znode()
        jaasFile = params.atlas_jaas_file if params.security_enabled else None
        upload_conf_set('atlas_configs', jaasFile)

        if params.security_enabled:  # update permissions before creating the collections
            solr_cloud_util.add_solr_roles(
                params.config,
                roles=[
                    params.infra_solr_role_atlas,
                    params.infra_solr_role_ranger_audit,
                    params.infra_solr_role_dev
                ],
                new_service_principals=[params.atlas_jaas_principal])

        create_collection('vertex_index', 'atlas_configs', jaasFile)
        create_collection('edge_index', 'atlas_configs', jaasFile)
        create_collection('fulltext_index', 'atlas_configs', jaasFile)

        if params.security_enabled:
            secure_znode(format('{infra_solr_znode}/configs/atlas_configs'),
                         jaasFile)
            secure_znode(format('{infra_solr_znode}/collections/vertex_index'),
                         jaasFile)
            secure_znode(format('{infra_solr_znode}/collections/edge_index'),
                         jaasFile)
            secure_znode(
                format('{infra_solr_znode}/collections/fulltext_index'),
                jaasFile)

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2"))

    is_atlas_upgrade_support = check_stack_feature(
        StackFeature.ATLAS_UPGRADE_SUPPORT,
        get_stack_feature_version(params.config))

    if is_atlas_upgrade_support and params.security_enabled:

        File(params.atlas_kafka_setup,
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("atlas_kafka_acl.sh.j2"))

        #  files required only in case if kafka broker is not present on the host as configured component
        if not params.host_with_kafka:
            File(format("{kafka_conf_dir}/kafka-env.sh"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_env_sh_template))

            File(format("{kafka_conf_dir}/kafka_jaas.conf"),
                 group=params.user_group,
                 owner=params.kafka_user,
                 content=Template("kafka_jaas.conf.j2"))

    if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(
            params.namenode_host) > 1:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hdfs-site'],
            owner=params.metadata_user,
            group=params.user_group,
            mode=0644)
    else:
        File(format('{conf_dir}/hdfs-site.xml'), action="delete")
    '''
    Atlas requires hadoop core-site.xml to resolve users/groups synced in HadoopUGI for
    authentication and authorization process. Earlier the core-site.xml was available in
    Hbase conf directory which is a part of Atlas class-path, from stack 2.6 onwards,
    core-site.xml is no more available in Hbase conf directory. Hence need to create
    core-site.xml in Atlas conf directory.
    '''
    if params.stack_supports_atlas_core_site and params.has_namenode:
        XmlConfig(
            "core-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['core-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['core-site'],
            owner=params.metadata_user,
            group=params.user_group,
            mode=0644)

    Directory(
        format('{metadata_home}/'),
        owner=params.metadata_user,
        group=params.user_group,
        recursive_ownership=True,
    )
Beispiel #10
0
def fill_conf_dir(component_conf_dir):
    import params

    Directory(component_conf_dir,
              owner=params.hive_user,
              group=params.user_group,
              create_parents=True)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=component_conf_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    File(format("{component_conf_dir}/hive-default.xml.template"),
         owner=params.hive_user,
         group=params.user_group)

    File(format("{component_conf_dir}/hive-env.sh.template"),
         owner=params.hive_user,
         group=params.user_group)

    # Create hive-log4j.properties and hive-exec-log4j.properties
    # in /etc/hive/conf and not in /etc/hive2/conf
    if params.log4j_version == '1':
        log4j_exec_filename = 'hive-exec-log4j.properties'
        if (params.log4j_exec_props != None):
            File(format("{component_conf_dir}/{log4j_exec_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=params.log4j_exec_props)
        elif (os.path.exists(
                "{component_conf_dir}/{log4j_exec_filename}.template")):
            File(format("{component_conf_dir}/{log4j_exec_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=StaticFile(
                     format(
                         "{component_conf_dir}/{log4j_exec_filename}.template")
                 ))

        log4j_filename = 'hive-log4j.properties'
        if (params.log4j_props != None):
            File(format("{component_conf_dir}/{log4j_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=params.log4j_props)
        elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")
              ):
            File(format("{component_conf_dir}/{log4j_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=StaticFile(
                     format("{component_conf_dir}/{log4j_filename}.template")))
        pass  # if params.log4j_version == '1'
Beispiel #11
0
def setup_ranger_plugin(component_select_name, service_name,
                        component_downloaded_custom_connector, component_driver_curl_source,
                        component_driver_curl_target, java_home,
                        repo_name, plugin_repo_dict,
                        ranger_env_properties, plugin_properties,
                        policy_user, policymgr_mgr_url,
                        plugin_enabled, conf_dict, component_user, component_group,
                        cache_service_list, plugin_audit_properties, plugin_audit_attributes,
                        plugin_security_properties, plugin_security_attributes,
                        plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes,
                        component_list, audit_db_is_enabled, credential_file, 
                        xa_audit_db_password, ssl_truststore_password,
                        ssl_keystore_password, api_version=None, stack_version_override = None, skip_if_rangeradmin_down = True):

  if audit_db_is_enabled:
    File(component_downloaded_custom_connector,
      content = DownloadSource(component_driver_curl_source),
      mode = 0644
    )

    Execute(('cp', '--remove-destination', component_downloaded_custom_connector, component_driver_curl_target),
      path=["/bin", "/usr/bin/"],
      sudo=True
    )

    File(component_driver_curl_target, mode=0644)

  stack_version = get_stack_version(component_select_name)
  if stack_version_override is not None:
    stack_version = stack_version_override

  component_conf_dir = conf_dict
  
  if plugin_enabled:

    if api_version == 'v2' and api_version is not None:
      ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
    else:
      ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)

    ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
                                            ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
                                            ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
                                            policy_user)

    current_datetime = datetime.now()
    
    File(format('{component_conf_dir}/ranger-security.xml'),
      owner = component_user,
      group = component_group,
      mode = 0644,
      content = InlineTemplate(format('<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'))
    )

    Directory([os.path.join('/etc', 'ranger', repo_name), os.path.join('/etc', 'ranger', repo_name, 'policycache')],
      owner = component_user,
      group = component_group,
      mode=0775,
      create_parents = True,
      cd_access = 'a'
    )

    for cache_service in cache_service_list:
      File(os.path.join('/etc', 'ranger', repo_name, 'policycache',format('{cache_service}_{repo_name}.json')),
        owner = component_user,
        group = component_group,
        mode = 0644
      )

    XmlConfig(format('ranger-{service_name}-audit.xml'),
      conf_dir=component_conf_dir,
      configurations=plugin_audit_properties,
      configuration_attributes=plugin_audit_attributes,
      owner = component_user,
      group = component_group,
      mode=0744)

    XmlConfig(format('ranger-{service_name}-security.xml'),
      conf_dir=component_conf_dir,
      configurations=plugin_security_properties,
      configuration_attributes=plugin_security_attributes,
      owner = component_user,
      group = component_group,
      mode=0744)

    if str(service_name).lower() == 'yarn' :
      XmlConfig("ranger-policymgr-ssl-yarn.xml",
        conf_dir=component_conf_dir,
        configurations=plugin_policymgr_ssl_properties,
        configuration_attributes=plugin_policymgr_ssl_attributes,
        owner = component_user,
        group = component_group,
        mode=0744) 
    else :
      XmlConfig("ranger-policymgr-ssl.xml",
        conf_dir=component_conf_dir,
        configurations=plugin_policymgr_ssl_properties,
        configuration_attributes=plugin_policymgr_ssl_attributes,
        owner = component_user,
        group = component_group,
        mode=0744) 

    #This should be done by rpm
    #setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)

    setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
              xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
              component_user, component_group, java_home)

  else:
    File(format('{component_conf_dir}/ranger-security.xml'),
      action="delete"      
    )    
Beispiel #12
0
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # copy tarball to HDFS feature not supported
        if not (params.stack_version_formatted_major
                and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS,
                                        params.stack_version_formatted_major)):
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
        if params.stack_version_formatted_major and check_stack_feature(
                StackFeature.COPY_TARBALL_TO_HDFS,
                params.stack_version_formatted_major):
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file,
                             host_sys_prepped=params.host_sys_prepped)
        # ******* End Copy Tarballs *******
        # *********************************

        # if warehouse directory is in DFS
        if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
                params.default_fs).scheme:
            # Create Hive Metastore Warehouse Dir
            params.HdfsResource(params.hive_apps_whs_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=0777)
        else:
            Logger.info(
                format(
                    "Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."
                ))

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    # Generate atlas-application.properties.xml file
    if has_atlas_in_cluster():
        atlas_hook_filepath = os.path.join(params.hive_config_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.HIVE,
                         params.hive_atlas_application_properties,
                         atlas_hook_filepath, params.hive_user,
                         params.user_group)

    if name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    if params.hive_metastore_site_supported and name == 'metastore':
        XmlConfig(
            "hivemetastore-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']
            ['hivemetastore-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hivemetastore-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if name == 'metastore' or name == 'hiveserver2':
        if params.hive_jdbc_target is not None and not os.path.exists(
                params.hive_jdbc_target):
            jdbc_connector(params.hive_jdbc_target,
                           params.hive_previous_jdbc_jar)
        if params.hive2_jdbc_target is not None and not os.path.exists(
                params.hive2_jdbc_target):
            jdbc_connector(params.hive2_jdbc_target,
                           params.hive2_previous_jdbc_jar)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hivemetastore.properties"),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-hivemetastore.properties.j2"))

        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_schematool_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p} -verbose")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_schematool_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p} -verbose"),
                params.hive_user)

            # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
            # Fixing it with the hack below:
            quoted_hive_metastore_user_passwd = quote_bash_args(
                quote_bash_args(params.hive_metastore_user_passwd))
            if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
                or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
                quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[
                    1:-1]
            Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(
                check_schema_created_cmd.replace(
                    format("-passWord {quoted_hive_metastore_user_passwd}"),
                    "-passWord " + utils.PASSWORDS_HIDE_STRING))

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hiveserver2.properties"),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-hiveserver2.properties.j2"))

    if name != "client":
        Directory(params.hive_pid_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_log_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_var_lib,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
Beispiel #13
0
def enable_kms_plugin():

    import params

    if params.has_ranger_admin:

        ranger_flag = False

        if params.stack_supports_ranger_kerberos and params.security_enabled:
            if not is_empty(params.rangerkms_principal
                            ) and params.rangerkms_principal != '':
                ranger_flag = check_ranger_service_support_kerberos(
                    params.kms_user, params.rangerkms_keytab,
                    params.rangerkms_principal)
            else:
                ranger_flag = check_ranger_service_support_kerberos(
                    params.kms_user, params.spengo_keytab,
                    params.spnego_principal)
        else:
            ranger_flag = check_ranger_service()

        if not ranger_flag:
            Logger.error('Error in Get/Create service for Ranger Kms.')

        current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        File(format('{kms_conf_dir}/ranger-security.xml'),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644,
             content=format(
                 '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'))

        Directory([
            os.path.join('/etc', 'ranger', params.repo_name),
            os.path.join('/etc', 'ranger', params.repo_name, 'policycache')
        ],
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775,
                  create_parents=True)

        File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',
                          format('kms_{repo_name}.json')),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644)

        # remove plain-text password from xml configs
        plugin_audit_properties_copy = {}
        plugin_audit_properties_copy.update(
            params.config['configurations']['ranger-kms-audit'])

        if params.plugin_audit_password_property in plugin_audit_properties_copy:
            plugin_audit_properties_copy[
                params.plugin_audit_password_property] = "crypted"

        XmlConfig(
            "ranger-kms-audit.xml",
            conf_dir=params.kms_conf_dir,
            configurations=plugin_audit_properties_copy,
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-audit'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-kms-security.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-security'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-security'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        # remove plain-text password from xml configs
        ranger_kms_policymgr_ssl_copy = {}
        ranger_kms_policymgr_ssl_copy.update(
            params.config['configurations']['ranger-kms-policymgr-ssl'])

        for prop in params.kms_plugin_password_properties:
            if prop in ranger_kms_policymgr_ssl_copy:
                ranger_kms_policymgr_ssl_copy[prop] = "crypted"

        XmlConfig(
            "ranger-policymgr-ssl.xml",
            conf_dir=params.kms_conf_dir,
            configurations=ranger_kms_policymgr_ssl_copy,
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-policymgr-ssl'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        if params.xa_audit_db_is_enabled:
            cred_setup = params.cred_setup_prefix + (
                '-f', params.credential_file, '-k', 'auditDBCred', '-v',
                PasswordString(params.xa_audit_db_password), '-c', '1')
            Execute(cred_setup,
                    environment={'JAVA_HOME': params.java_home},
                    logoutput=True,
                    sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslKeyStore', '-v',
            PasswordString(params.ssl_keystore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslTrustStore', '-v',
            PasswordString(params.ssl_truststore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        File(params.credential_file,
             owner=params.kms_user,
             group=params.kms_group,
             mode=0640)

        # create ranger kms audit directory
        if params.xa_audit_hdfs_is_enabled and params.has_namenode and params.has_hdfs_client_on_node:
            params.HdfsResource("/ranger/audit",
                                type="directory",
                                action="create_on_execute",
                                owner=params.hdfs_user,
                                group=params.hdfs_user,
                                mode=0755,
                                recursive_chmod=True)
            params.HdfsResource("/ranger/audit/kms",
                                type="directory",
                                action="create_on_execute",
                                owner=params.kms_user,
                                group=params.kms_group,
                                mode=0750,
                                recursive_chmod=True)
            params.HdfsResource(None, action="execute")

        if params.xa_audit_hdfs_is_enabled and len(params.namenode_host) > 1:
            Logger.info(
                'Audit to Hdfs enabled in NameNode HA environment, creating hdfs-site.xml'
            )
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.kms_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configuration_attributes']['hdfs-site'],
                owner=params.kms_user,
                group=params.kms_group,
                mode=0644)
        else:
            File(format('{kms_conf_dir}/hdfs-site.xml'), action="delete")
Beispiel #14
0
def kms(upgrade_type=None):
    import params

    if params.has_ranger_admin:

        Directory(params.kms_conf_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  create_parents=True)

        Directory("/etc/security/serverKeys",
                  create_parents=True,
                  cd_access="a")

        Directory("/etc/ranger/kms", create_parents=True, cd_access="a")

        copy_jdbc_connector()

        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
            mode=0644,
        )

        cp = format("{check_db_connection_jar}")
        if params.db_flavor.lower() == 'sqla':
            cp = cp + os.pathsep + format(
                "{kms_home}/ews/webapp/lib/sajdbc4.jar")
        else:
            path_to_jdbc = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
            if not os.path.isfile(path_to_jdbc):
                path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + \
                               params.default_connectors_map[params.db_flavor.lower()] if params.db_flavor.lower() in params.default_connectors_map else None
                if not os.path.isfile(path_to_jdbc):
                    path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + "*"
                    error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.db_flavor] + \
                          " in ranger kms lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
                    Logger.error(error_message)

            cp = cp + os.pathsep + path_to_jdbc

        db_connection_check_command = format(
            "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_kms_jdbc_connection_url}' {db_user} {db_password!p} {ranger_kms_jdbc_driver}"
        )

        env_dict = {}
        if params.db_flavor.lower() == 'sqla':
            env_dict = {'LD_LIBRARY_PATH': params.ld_library_path}

        Execute(db_connection_check_command,
                path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
                tries=5,
                try_sleep=10,
                environment=env_dict)

        if params.xa_audit_db_is_enabled and params.driver_source is not None and not params.driver_source.endswith(
                "/None"):
            if params.xa_previous_jdbc_jar and os.path.isfile(
                    params.xa_previous_jdbc_jar):
                File(params.xa_previous_jdbc_jar, action='delete')

            File(params.downloaded_connector_path,
                 content=DownloadSource(params.driver_source),
                 mode=0644)

            Execute(('cp', '--remove-destination',
                     params.downloaded_connector_path, params.driver_target),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

            File(params.driver_target, mode=0644)

        Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF',
                               'classes', 'lib'),
                  mode=0755,
                  owner=params.kms_user,
                  group=params.kms_group)

        Execute(('cp', format('{kms_home}/ranger-kms-initd'),
                 '/etc/init.d/ranger-kms'),
                not_if=format('ls /etc/init.d/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File('/etc/init.d/ranger-kms', mode=0755)

        Directory(
            format('{kms_home}/'),
            owner=params.kms_user,
            group=params.kms_group,
            recursive_ownership=True,
        )

        Directory(params.ranger_kms_pid_dir,
                  mode=0755,
                  owner=params.kms_user,
                  group=params.user_group,
                  cd_access="a",
                  create_parents=True)

        if params.stack_supports_pid:
            File(
                format('{kms_conf_dir}/ranger-kms-env-piddir.sh'),
                content=format(
                    "export RANGER_KMS_PID_DIR_PATH={ranger_kms_pid_dir}\nexport KMS_USER={kms_user}"
                ),
                owner=params.kms_user,
                group=params.kms_group,
                mode=0755)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  cd_access='a',
                  create_parents=True,
                  mode=0755)

        File(format('{kms_conf_dir}/ranger-kms-env-logdir.sh'),
             content=format("export RANGER_KMS_LOG_DIR={kms_log_dir}"),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms'),
                not_if=format('ls /usr/bin/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms-services.sh'),
                not_if=format('ls /usr/bin/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms-services.sh', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms-initd'),
                 format('{kms_home}/ranger-kms-services.sh')),
                not_if=format('ls {kms_home}/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File(format('{kms_home}/ranger-kms-services.sh'), mode=0755)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775)

        do_keystore_setup(params.credential_provider_path, params.jdbc_alias,
                          params.db_password)
        do_keystore_setup(params.credential_provider_path,
                          params.masterkey_alias,
                          params.kms_master_key_password)
        if params.stack_support_kms_hsm and params.enable_kms_hsm:
            do_keystore_setup(params.credential_provider_path,
                              params.hms_partition_alias,
                              unicode(params.hms_partition_passwd))
        if params.stack_supports_ranger_kms_ssl and params.ranger_kms_ssl_enabled:
            do_keystore_setup(params.ranger_kms_cred_ssl_path,
                              params.ranger_kms_ssl_keystore_alias,
                              params.ranger_kms_ssl_passwd)

        # remove plain-text password from xml configs
        dbks_site_copy = {}
        dbks_site_copy.update(params.config['configurations']['dbks-site'])

        for prop in params.dbks_site_password_properties:
            if prop in dbks_site_copy:
                dbks_site_copy[prop] = "_"

        XmlConfig(
            "dbks-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=dbks_site_copy,
            configuration_attributes=params.config['configuration_attributes']
            ['dbks-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        ranger_kms_site_copy = {}
        ranger_kms_site_copy.update(
            params.config['configurations']['ranger-kms-site'])
        if params.stack_supports_ranger_kms_ssl:
            # remove plain-text password from xml configs
            for prop in params.ranger_kms_site_password_properties:
                if prop in ranger_kms_site_copy:
                    ranger_kms_site_copy[prop] = "_"

        XmlConfig(
            "ranger-kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=ranger_kms_site_copy,
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        XmlConfig(
            "kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
             owner=params.kms_user,
             group=params.kms_group,
             content=InlineTemplate(params.kms_log4j),
             mode=0644)
        if params.security_enabled:
            # core-site.xml linking required by setup for HDFS encryption
            XmlConfig(
                "core-site.xml",
                conf_dir=params.kms_conf_dir,
                configurations=params.config['configurations']['core-site'],
                configuration_attributes=params.
                config['configuration_attributes']['core-site'],
                owner=params.kms_user,
                group=params.kms_group,
                mode=0644)
        else:
            File(format('{kms_conf_dir}/core-site.xml'), action="delete")
Beispiel #15
0
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # HDP 2.1.* or lower
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, "2.2.0.0") < 0:
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        # HDP 2.2 or higher, copy mapreduce.tar.gz to HDFS
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, '2.2') >= 0:
            copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
            copy_to_hdfs("tez", params.user_group, params.hdfs_user)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account for both HDP 2.1 and 2.2
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file)
        # ******* End Copy Tarballs *******
        # *********************************

        # Create Hive Metastore Warehouse Dir
        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=0777)

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    setup_atlas_hive()

    if params.hive_specific_configs_supported and name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              recursive=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if name == 'metastore' or name == 'hiveserver2':
        jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p}")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p}"),
                params.hive_user)

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

    if name != "client":
        crt_directory(params.hive_pid_dir)
        crt_directory(params.hive_log_dir)
        crt_directory(params.hive_var_lib)
Beispiel #16
0
def oozie(is_server=False, upgrade_type=None):
    import params

    if is_server:
        params.HdfsResource(params.oozie_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.oozie_user,
                            mode=params.oozie_hdfs_user_mode)
        params.HdfsResource(None, action="execute")
    Directory(params.conf_dir,
              create_parents=True,
              owner=params.oozie_user,
              group=params.user_group)

    params.oozie_site = update_credential_provider_path(
        params.oozie_site, 'oozie-site',
        os.path.join(params.conf_dir, 'oozie-site.jceks'), params.oozie_user,
        params.user_group)

    XmlConfig(
        "oozie-site.xml",
        conf_dir=params.conf_dir,
        configurations=params.oozie_site,
        configuration_attributes=params.config['configuration_attributes']
        ['oozie-site'],
        owner=params.oozie_user,
        group=params.user_group,
        mode=0664)
    File(
        format("{conf_dir}/oozie-env.sh"),
        owner=params.oozie_user,
        content=InlineTemplate(params.oozie_env_sh_template),
        group=params.user_group,
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("oozie.conf.j2"))

    if (params.log4j_props != None):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=InlineTemplate(params.log4j_props))
    elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user)

    if params.stack_version_formatted and check_stack_feature(
            StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
        File(format("{params.conf_dir}/adminusers.txt"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=Template('adminusers.txt.j2',
                              oozie_admin_users=params.oozie_admin_users))
    else:
        File(format("{params.conf_dir}/adminusers.txt"),
             owner=params.oozie_user,
             group=params.user_group)

    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
       params.jdbc_driver_name == "org.postgresql.Driver" or \
       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
        )
    pass

    oozie_ownership()

    if is_server:
        oozie_server_specific(upgrade_type)
Beispiel #17
0
def hive(name=None):
    import params

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_conf_dir,
        configurations=params.config['configurations']['hive-site'],
        owner=params.hive_user,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'])

    if name in ["hiveserver2", "metastore"]:
        # Manually overriding service logon user & password set by the installation package
        service_name = params.service_map[name]
        ServiceConfig(service_name,
                      action="change_user",
                      username=params.hive_user,
                      password=Script.get_password(params.hive_user))
        Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"),
                logoutput=True,
                user=params.hadoop_user)

    if name == 'metastore':
        if params.init_metastore_schema:
            check_schema_created_cmd = format(
                'cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
                '-dbType {hive_metastore_db_type} '
                '-userName {hive_metastore_user_name} '
                '-passWord {hive_metastore_user_passwd!p}'
                '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"',  #cmd "feature", propagate the process exit code manually
                hive_bin=params.hive_bin,
                hive_metastore_db_type=params.hive_metastore_db_type,
                hive_metastore_user_name=params.hive_metastore_user_name,
                hive_metastore_user_passwd=params.hive_metastore_user_passwd)
            try:
                Execute(check_schema_created_cmd)
            except Fail:
                create_schema_cmd = format(
                    'cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
                    '-dbType {hive_metastore_db_type} '
                    '-userName {hive_metastore_user_name} '
                    '-passWord {hive_metastore_user_passwd!p}',
                    hive_bin=params.hive_bin,
                    hive_metastore_db_type=params.hive_metastore_db_type,
                    hive_metastore_user_name=params.hive_metastore_user_name,
                    hive_metastore_user_passwd=params.
                    hive_metastore_user_passwd)
                Execute(create_schema_cmd,
                        user=params.hive_user,
                        logoutput=True)

    if name == "hiveserver2":
        if params.hive_execution_engine == "tez":
            # Init the tez app dir in hadoop
            script_file = __file__.replace('/', os.sep)
            cmd_file = os.path.normpath(
                os.path.join(os.path.dirname(script_file), "..", "files",
                             "hiveTezSetup.cmd"))

            Execute("cmd /c " + cmd_file,
                    logoutput=True,
                    user=params.hadoop_user)
Beispiel #18
0
def oozie_server_specific(upgrade_type):
    import params

    no_op_test = as_user(format(
        "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"
    ),
                         user=params.oozie_user)

    File(params.pid_file, action="delete", not_if=no_op_test)

    oozie_server_directories = [
        format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir,
        params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir,
        params.oozie_lib_dir, params.oozie_webapps_dir,
        params.oozie_webapps_conf_dir, params.oozie_server_dir
    ]
    Directory(
        oozie_server_directories,
        owner=params.oozie_user,
        group=params.user_group,
        mode=0755,
        create_parents=True,
        cd_access="a",
    )

    Directory(
        params.oozie_libext_dir,
        create_parents=True,
    )

    hashcode_file = format("{oozie_home}/.hashcode")
    skip_recreate_sharelib = format(
        "test -f {hashcode_file} && test -d {oozie_home}/share")

    untar_sharelib = ('tar', '-xvf',
                      format('{oozie_home}/oozie-sharelib.tar.gz'), '-C',
                      params.oozie_home)

    Execute(
        untar_sharelib,  # time-expensive
        not_if=format("{no_op_test} || {skip_recreate_sharelib}"),
        sudo=True,
    )

    configure_cmds = []
    # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
    source_ext_zip_paths = get_oozie_ext_zip_source_paths(upgrade_type, params)

    # Copy the first oozie ext-2.2.zip file that is found.
    # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
    if source_ext_zip_paths is not None:
        for source_ext_zip_path in source_ext_zip_paths:
            if os.path.isfile(source_ext_zip_path):
                configure_cmds.append(
                    ('cp', source_ext_zip_path, params.oozie_libext_dir))
                configure_cmds.append(
                    ('chown', format('{oozie_user}:{user_group}'),
                     format('{oozie_libext_dir}/{ext_js_file}')))

                Execute(
                    configure_cmds,
                    not_if=no_op_test,
                    sudo=True,
                )
                break

    Directory(
        params.oozie_webapps_conf_dir,
        owner=params.oozie_user,
        group=params.user_group,
        recursive_ownership=True,
        recursion_follow_links=True,
    )

    # download the database JAR
    download_database_library_if_needed()

    #falcon el extension
    if params.has_falcon_host:
        Execute(format(
            '{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'
        ),
                not_if=no_op_test)

        Execute(format(
            '{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'
        ),
                not_if=no_op_test)

    if params.lzo_enabled:
        all_lzo_packages = get_lzo_packages(params.stack_version_unformatted)
        Package(all_lzo_packages,
                retry_on_repo_unavailability=params.
                agent_stack_retry_on_unavailability,
                retry_count=params.agent_stack_retry_count)
        Execute(
            format(
                '{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
            not_if=no_op_test,
        )

    prepare_war(params)

    File(
        hashcode_file,
        mode=0644,
    )

    if params.stack_version_formatted and check_stack_feature(
            StackFeature.OOZIE_CREATE_HIVE_TEZ_CONFIGS,
            params.stack_version_formatted):
        # Create hive-site and tez-site configs for oozie
        Directory(params.hive_conf_dir,
                  create_parents=True,
                  owner=params.oozie_user,
                  group=params.user_group)
        if 'hive-site' in params.config['configurations']:
            hive_site_config = update_credential_provider_path(
                params.config['configurations']['hive-site'], 'hive-site',
                os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
                params.oozie_user, params.user_group)
            XmlConfig("hive-site.xml",
                      conf_dir=params.hive_conf_dir,
                      configurations=hive_site_config,
                      configuration_attributes=params.
                      config['configuration_attributes']['hive-site'],
                      owner=params.oozie_user,
                      group=params.user_group,
                      mode=0644)
        if 'tez-site' in params.config['configurations']:
            XmlConfig(
                "tez-site.xml",
                conf_dir=params.hive_conf_dir,
                configurations=params.config['configurations']['tez-site'],
                configuration_attributes=params.
                config['configuration_attributes']['tez-site'],
                owner=params.oozie_user,
                group=params.user_group,
                mode=0664)

        # If Atlas is also installed, need to generate Atlas Hive hook (hive-atlas-application.properties file) in directory
        # {stack_root}/{current_version}/atlas/hook/hive/
        # Because this is a .properties file instead of an xml file, it will not be read automatically by Oozie.
        # However, should still save the file on this host so that can upload it to the Oozie Sharelib in DFS.
        if has_atlas_in_cluster():
            atlas_hook_filepath = os.path.join(params.hive_conf_dir,
                                               params.atlas_hook_filename)
            Logger.info(
                "Has atlas in cluster, will save Atlas Hive hook into location %s"
                % str(atlas_hook_filepath))
            setup_atlas_hook(SERVICE.HIVE,
                             params.hive_atlas_application_properties,
                             atlas_hook_filepath, params.oozie_user,
                             params.user_group)

    Directory(
        params.oozie_server_dir,
        owner=params.oozie_user,
        group=params.user_group,
        recursive_ownership=True,
    )
    if params.security_enabled:
        File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'),
             owner=params.oozie_user,
             group=params.user_group,
             content=Template("zkmigrator_jaas.conf.j2"))
Beispiel #19
0
def setup_hiveserver2():
    import params

    File(params.start_hiveserver2_path,
         mode=0755,
         content=Template(format('{start_hiveserver2_script}')))

    File(os.path.join(params.hive_server_conf_dir,
                      "hadoop-metrics2-hiveserver2.properties"),
         owner=params.hive_user,
         group=params.user_group,
         content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
         mode=0600)
    XmlConfig(
        "hiveserver2-site.xml",
        conf_dir=params.hive_server_conf_dir,
        configurations=params.config['configurations']['hiveserver2-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['hiveserver2-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0600)

    # copy tarball to HDFS feature not supported
    params.HdfsResource(params.webhcat_apps_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.webhcat_user,
                        mode=0755)

    # Create webhcat dirs.
    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
        params.HdfsResource(params.hcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.hcat_hdfs_user_mode)

    params.HdfsResource(params.webhcat_hdfs_user_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.webhcat_user,
                        mode=params.webhcat_hdfs_user_mode)

    # if warehouse directory is in DFS
    if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
            params.default_fs).scheme:
        # Create Hive Metastore Warehouse Dir
        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=0777)
    else:
        Logger.info(
            format(
                "Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."
            ))

    # Create Hive User Dir
    params.HdfsResource(params.hive_hdfs_user_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.hive_user,
                        mode=params.hive_hdfs_user_mode)

    if not is_empty(params.hive_exec_scratchdir) and not urlparse(
            params.hive_exec_scratchdir).path.startswith("/tmp"):
        params.HdfsResource(
            params.hive_exec_scratchdir,
            type="directory",
            action="create_on_execute",
            owner=params.hive_user,
            group=params.hdfs_user,
            mode=0777
        )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

    params.HdfsResource(None, action="execute")
Beispiel #20
0
def hive_interactive(name=None):
    import params

    # list of properties that should be excluded from the config
    # this approach is a compromise against adding a dedicated config
    # type for hive_server_interactive or needed config groups on a
    # per component basis
    exclude_list = ['hive.enforce.bucketing', 'hive.enforce.sorting']

    # List of configs to be excluded from hive2 client, but present in Hive2 server.
    exclude_list_for_hive2_client = ['javax.jdo.option.ConnectionPassword']

    # Copy Tarballs in HDFS.
    if params.stack_version_formatted_major and check_stack_feature(
            StackFeature.ROLLING_UPGRADE,
            params.stack_version_formatted_major):
        resource_created = copy_to_hdfs(
            "tez_hive2",
            params.user_group,
            params.hdfs_user,
            file_mode=params.tarballs_mode,
            host_sys_prepped=params.host_sys_prepped)

        if resource_created:
            params.HdfsResource(None, action="execute")

    Directory(params.hive_interactive_etc_dir_prefix, mode=0755)

    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)
    '''
  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
  '''
    merged_hive_interactive_site = {}
    merged_hive_interactive_site.update(
        params.config['configurations']['hive-site'])
    merged_hive_interactive_site.update(
        params.config['configurations']['hive-interactive-site'])
    for item in exclude_list:
        if item in merged_hive_interactive_site.keys():
            del merged_hive_interactive_site[item]
    '''
  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
  '''
    remove_atlas_hook_if_exists(merged_hive_interactive_site)
    '''
  As tez_hive2/tez-site.xml only contains the new + the changed props compared to tez/tez-site.xml,
  we need to merge tez/tez-site.xml and tez_hive2/tez-site.xml and store it in tez_hive2/tez-site.xml.
  '''
    merged_tez_interactive_site = {}
    if 'tez-site' in params.config['configurations']:
        merged_tez_interactive_site.update(
            params.config['configurations']['tez-site'])
        Logger.info(
            "Retrieved 'tez/tez-site' for merging with 'tez_hive2/tez-interactive-site'."
        )
    else:
        Logger.error(
            "Tez's 'tez-site' couldn't be retrieved from passed-in configurations."
        )

    merged_tez_interactive_site.update(
        params.config['configurations']['tez-interactive-site'])
    XmlConfig(
        "tez-site.xml",
        conf_dir=params.tez_interactive_config_dir,
        configurations=merged_tez_interactive_site,
        configuration_attributes=params.config['configuration_attributes']
        ['tez-interactive-site'],
        owner=params.tez_interactive_user,
        group=params.user_group,
        mode=0664)
    '''
  Merge properties from hiveserver2-interactive-site into hiveserver2-site
  '''
    merged_hiveserver2_interactive_site = {}
    if 'hiveserver2-site' in params.config['configurations']:
        merged_hiveserver2_interactive_site.update(
            params.config['configurations']['hiveserver2-site'])
        Logger.info(
            "Retrieved 'hiveserver2-site' for merging with 'hiveserver2-interactive-site'."
        )
    else:
        Logger.error(
            "'hiveserver2-site' couldn't be retrieved from passed-in configurations."
        )
    merged_hiveserver2_interactive_site.update(
        params.config['configurations']['hiveserver2-interactive-site'])

    # Create config files under /etc/hive2/conf and /etc/hive2/conf/conf.server:
    #   hive-site.xml
    #   hive-env.sh
    #   llap-daemon-log4j2.properties
    #   llap-cli-log4j2.properties
    #   hive-log4j2.properties
    #   hive-exec-log4j2.properties
    #   beeline-log4j2.properties

    hive2_conf_dirs_list = params.hive_conf_dirs_list
    hive2_client_conf_path = format(
        "{stack_root}/current/{component_directory}/conf")

    # Making copy of 'merged_hive_interactive_site' in 'merged_hive_interactive_site_copy', and deleting 'javax.jdo.option.ConnectionPassword'
    # config from there, as Hive2 client shouldn't have that config.
    merged_hive_interactive_site_copy = merged_hive_interactive_site.copy()
    for item in exclude_list_for_hive2_client:
        if item in merged_hive_interactive_site.keys():
            del merged_hive_interactive_site_copy[item]

    for conf_dir in hive2_conf_dirs_list:
        if conf_dir == hive2_client_conf_path:
            XmlConfig(
                "hive-site.xml",
                conf_dir=conf_dir,
                configurations=merged_hive_interactive_site_copy,
                configuration_attributes=params.
                config['configuration_attributes']['hive-interactive-site'],
                owner=params.hive_user,
                group=params.user_group,
                mode=0644)
        else:
            XmlConfig(
                "hive-site.xml",
                conf_dir=conf_dir,
                configurations=merged_hive_interactive_site,
                configuration_attributes=params.
                config['configuration_attributes']['hive-interactive-site'],
                owner=params.hive_user,
                group=params.user_group,
                mode=0644)

        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=conf_dir,
            configurations=merged_hiveserver2_interactive_site,
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-interactive-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

        hive_server_interactive_conf_dir = conf_dir

        File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
             owner=params.hive_user,
             group=params.user_group,
             content=InlineTemplate(params.hive_interactive_env_sh_template))

        llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.llap_daemon_log4j)

        llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.llap_cli_log4j2)

        hive_log4j2_filename = 'hive-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.hive_log4j2)

        hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.hive_exec_log4j2)

        beeline_log4j2_filename = 'beeline-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.beeline_log4j2)

        File(format(
            "{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"
        ),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-llapdaemon.j2"))

        File(format(
            "{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"
        ),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-llaptaskscheduler.j2"))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if not os.path.exists(params.target_hive_interactive):
        jdbc_connector(params.target_hive_interactive,
                       params.hive_intaractive_previous_jdbc_jar)

    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
         content=DownloadSource(
             format("{jdk_location}{check_db_connection_jar_name}")),
         mode=0644)
    File(params.start_hiveserver2_interactive_path,
         mode=0755,
         content=Template(format('{start_hiveserver2_interactive_script}')))

    Directory(params.hive_pid_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_log_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_interactive_var_lib,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
Beispiel #21
0
def kms():
    import params

    if params.has_ranger_admin:

        File(params.downloaded_connector_path,
             content=DownloadSource(params.driver_source))

        if not os.path.isfile(params.driver_target):
            Execute(('cp', '--remove-destination',
                     params.downloaded_connector_path, params.driver_target),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

        Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF',
                               'classes', 'lib'),
                  mode=0755,
                  owner=params.kms_user,
                  group=params.kms_group)

        Execute(('cp', format('{kms_home}/ranger-kms-initd'),
                 '/etc/init.d/ranger-kms'),
                not_if=format('ls /etc/init.d/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File('/etc/init.d/ranger-kms', mode=0755)

        Execute(('chown', '-R', format('{kms_user}:{kms_group}'),
                 format('{kms_home}/')),
                sudo=True)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms'),
                not_if=format('ls /usr/bin/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms-services.sh'),
                not_if=format('ls /usr/bin/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms-services.sh', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms-initd'),
                 format('{kms_home}/ranger-kms-services.sh')),
                not_if=format('ls {kms_home}/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File(format('{kms_home}/ranger-kms-services.sh'), mode=0755)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775)

        do_keystore_setup(params.credential_provider_path, params.jdbc_alias,
                          params.db_password)
        do_keystore_setup(params.credential_provider_path,
                          params.masterkey_alias,
                          params.kms_master_key_password)

        XmlConfig(
            "dbks-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['dbks-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['dbks-site'],
            owner=params.kms_user,
            group=params.kms_group)

        XmlConfig(
            "ranger-kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-site'],
            owner=params.kms_user,
            group=params.kms_group)

        XmlConfig(
            "kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['kms-site'],
            owner=params.kms_user,
            group=params.kms_group)

        File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
             owner=params.kms_user,
             group=params.kms_group,
             content=params.kms_log4j)
Beispiel #22
0
def yarn(name=None, config_dir=None):
    """
  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
  """
    import params

    if config_dir is None:
        config_dir = params.hadoop_conf_dir

    if name == "historyserver":
        if params.yarn_log_aggregation_enabled:
            params.HdfsResource(params.yarn_nm_app_log_dir,
                                action="create_on_execute",
                                type="directory",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=01777,
                                recursive_chmod=True)

        # create the /tmp folder with proper permissions if it doesn't exist yet
        if params.entity_file_history_directory.startswith('/tmp'):
            params.HdfsResource(
                params.hdfs_tmp_dir,
                action="create_on_execute",
                type="directory",
                owner=params.hdfs_user,
                mode=0777,
            )

        params.HdfsResource(params.entity_file_history_directory,
                            action="create_on_execute",
                            type="directory",
                            owner=params.yarn_user,
                            group=params.user_group)
        params.HdfsResource("/mapred",
                            type="directory",
                            action="create_on_execute",
                            owner=params.mapred_user)
        params.HdfsResource("/mapred/system",
                            type="directory",
                            action="create_on_execute",
                            owner=params.hdfs_user)
        params.HdfsResource(params.mapreduce_jobhistory_done_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.mapred_user,
                            group=params.user_group,
                            change_permissions_for_parents=True,
                            mode=0777)
        params.HdfsResource(None, action="execute")
        Directory(
            params.jhs_leveldb_state_store_dir,
            owner=params.mapred_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
            recursive_ownership=True,
        )

    #<editor-fold desc="Node Manager Section">
    if name == "nodemanager":

        # First start after enabling/disabling security
        if params.toggle_nm_security:
            Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
                      action='delete')

            # If yarn.nodemanager.recovery.dir exists, remove this dir
            if params.yarn_nodemanager_recovery_dir:
                Directory(InlineTemplate(
                    params.yarn_nodemanager_recovery_dir).get_content(),
                          action='delete')

            # Setting NM marker file
            if params.security_enabled:
                Directory(params.nm_security_marker_dir)
                File(
                    params.nm_security_marker,
                    content=
                    "Marker file to track first start after enabling/disabling security. "
                    "During first start yarn local, log dirs are removed and recreated"
                )
            elif not params.security_enabled:
                File(params.nm_security_marker, action="delete")

        if not params.security_enabled or params.toggle_nm_security:
            # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
            nm_log_dir_to_mount_file_content = handle_mounted_dirs(
                create_log_dir, params.nm_log_dirs,
                params.nm_log_dir_to_mount_file, params)
            # create a history file used by handle_mounted_dirs
            File(params.nm_log_dir_to_mount_file,
                 owner=params.hdfs_user,
                 group=params.user_group,
                 mode=0644,
                 content=nm_log_dir_to_mount_file_content)
            nm_local_dir_to_mount_file_content = handle_mounted_dirs(
                create_local_dir, params.nm_local_dirs,
                params.nm_local_dir_to_mount_file, params)
            File(params.nm_local_dir_to_mount_file,
                 owner=params.hdfs_user,
                 group=params.user_group,
                 mode=0644,
                 content=nm_local_dir_to_mount_file_content)
    #</editor-fold>

    if params.yarn_nodemanager_recovery_dir:
        Directory(
            InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            mode=0755,
            cd_access='a',
        )

    Directory(
        [params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )

    Directory(
        [
            params.mapred_pid_dir_prefix, params.mapred_pid_dir,
            params.mapred_log_dir_prefix, params.mapred_log_dir
        ],
        owner=params.mapred_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [params.yarn_log_dir_prefix],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        ignore_failures=True,
        cd_access='a',
    )

    XmlConfig(
        "core-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['core-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    # During RU, Core Masters and Slaves need hdfs-site.xml
    # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
    # RU should rely on all available in <stack-root>/<version>/hadoop/conf
    if 'hdfs-site' in params.config['configurations']:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hdfs-site'],
            owner=params.hdfs_user,
            group=params.user_group,
            mode=0644)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "yarn-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['yarn-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['yarn-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configuration_attributes']
        ['capacity-scheduler'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    if name == 'resourcemanager':
        Directory(
            params.rm_nodes_exclude_dir,
            mode=0755,
            create_parents=True,
            cd_access='a',
        )
        File(params.rm_nodes_exclude_path,
             owner=params.yarn_user,
             group=params.user_group)
        File(params.yarn_job_summary_log,
             owner=params.yarn_user,
             group=params.user_group)
        if not is_empty(
                params.node_label_enable
        ) and params.node_label_enable or is_empty(
                params.node_label_enable) and params.node_labels_dir:
            params.HdfsResource(params.node_labels_dir,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0700)
            params.HdfsResource(None, action="execute")

    elif name == 'apptimelineserver':
        Directory(
            params.ats_leveldb_dir,
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
        )

        # if stack support application timeline-service state store property (timeline_state_store stack feature)
        if params.stack_supports_timeline_state_store:
            Directory(
                params.ats_leveldb_state_store_dir,
                owner=params.yarn_user,
                group=params.user_group,
                create_parents=True,
                cd_access="a",
            )
        # app timeline server 1.5 directories
        if not is_empty(params.entity_groupfs_store_dir):
            parent_path = os.path.dirname(params.entity_groupfs_store_dir)
            params.HdfsResource(parent_path,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0755)
            params.HdfsResource(params.entity_groupfs_store_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=params.entity_groupfs_store_dir_mode)
        if not is_empty(params.entity_groupfs_active_dir):
            parent_path = os.path.dirname(params.entity_groupfs_active_dir)
            params.HdfsResource(parent_path,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0755)
            params.HdfsResource(params.entity_groupfs_active_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=params.entity_groupfs_active_dir_mode)
        params.HdfsResource(None, action="execute")

    File(format("{limits_conf_dir}/yarn.conf"),
         mode=0644,
         content=Template('yarn.conf.j2'))

    File(format("{limits_conf_dir}/mapreduce.conf"),
         mode=0644,
         content=Template('mapreduce.conf.j2'))

    File(os.path.join(config_dir, "yarn-env.sh"),
         owner=params.yarn_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.yarn_env_sh_template))

    container_executor = format("{yarn_container_bin}/container-executor")
    File(container_executor,
         group=params.yarn_executor_container_group,
         mode=params.container_executor_mode)

    File(os.path.join(config_dir, "container-executor.cfg"),
         group=params.user_group,
         mode=0644,
         content=Template('container-executor.cfg.j2'))

    Directory(params.cgroups_dir,
              group=params.user_group,
              create_parents=True,
              mode=0755,
              cd_access="a")

    if params.security_enabled:
        tc_mode = 0644
        tc_owner = "root"
    else:
        tc_mode = None
        tc_owner = params.hdfs_user

    File(os.path.join(config_dir, "mapred-env.sh"),
         owner=tc_owner,
         mode=0755,
         content=InlineTemplate(params.mapred_env_sh_template))

    if params.security_enabled:
        File(os.path.join(params.hadoop_bin, "task-controller"),
             owner="root",
             group=params.mapred_tt_group,
             mode=06050)
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=tc_owner,
             mode=tc_mode,
             group=params.mapred_tt_group,
             content=Template("taskcontroller.cfg.j2"))
    else:
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=tc_owner,
             content=Template("taskcontroller.cfg.j2"))

    if "mapred-site" in params.config['configurations']:
        XmlConfig(
            "mapred-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['mapred-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['mapred-site'],
            owner=params.mapred_user,
            group=params.user_group)

    if "capacity-scheduler" in params.config['configurations']:
        XmlConfig(
            "capacity-scheduler.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']
            ['capacity-scheduler'],
            configuration_attributes=params.config['configuration_attributes']
            ['capacity-scheduler'],
            owner=params.hdfs_user,
            group=params.user_group)
    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)
    if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
        File(os.path.join(config_dir, 'fair-scheduler.xml'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-client.xml.example')):
        File(os.path.join(config_dir, 'ssl-client.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-server.xml.example')):
        File(os.path.join(config_dir, 'ssl-server.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)
Beispiel #23
0
def hdfs(name=None):
    import params

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(
        params.limits_conf_dir,
        create_parents=True,
        owner='root',
        group='root')

    File(
        os.path.join(params.limits_conf_dir, 'hdfs.conf'),
        owner='root',
        group='root',
        mode=0644,
        content=Template("hdfs.conf.j2"))

    if params.security_enabled:
        File(
            os.path.join(params.hadoop_conf_dir, 'hdfs_dn_jaas.conf'),
            owner=params.hdfs_user,
            group=params.user_group,
            content=Template("hdfs_dn_jaas.conf.j2"))
        File(
            os.path.join(params.hadoop_conf_dir, 'hdfs_nn_jaas.conf'),
            owner=params.hdfs_user,
            group=params.user_group,
            content=Template("hdfs_nn_jaas.conf.j2"))
        if params.dfs_ha_enabled:
            File(
                os.path.join(params.hadoop_conf_dir, 'hdfs_jn_jaas.conf'),
                owner=params.hdfs_user,
                group=params.user_group,
                content=Template("hdfs_jn_jaas.conf.j2"))

        tc_mode = 0644
        tc_owner = "root"
    else:
        tc_mode = None
        tc_owner = params.hdfs_user

    if "hadoop-policy" in params.config['configurations']:
        XmlConfig(
            "hadoop-policy.xml",
            conf_dir=params.hadoop_conf_dir,
            configurations=params.config['configurations']['hadoop-policy'],
            configuration_attributes=params.config['configurationAttributes']
            ['hadoop-policy'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=params.hadoop_conf_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)

    XmlConfig(
        "hdfs-site.xml",
        conf_dir=params.hadoop_conf_dir,
        configurations=params.config['configurations']['hdfs-site'],
        configuration_attributes=params.config['configurationAttributes']
        ['hdfs-site'],
        owner=params.hdfs_user,
        group=params.user_group)

    XmlConfig(
        "core-site.xml",
        conf_dir=params.hadoop_conf_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configurationAttributes']
        ['core-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644,
        xml_include_file=params.mount_table_xml_inclusion_file_full_path)

    if params.mount_table_content:
        File(
            params.mount_table_xml_inclusion_file_full_path,
            owner=params.hdfs_user,
            group=params.user_group,
            content=params.mount_table_content,
            mode=0644)

    File(
        os.path.join(params.hadoop_conf_dir, 'slaves'),
        owner=tc_owner,
        content=Template("slaves.j2"))
Beispiel #24
0
def setup_usersync(upgrade_type=None):
    import params

    usersync_home = params.usersync_home
    ranger_home = params.ranger_home
    ranger_ugsync_conf = params.ranger_ugsync_conf

    if not is_empty(
            params.ranger_usersync_ldap_ldapbindpassword
    ) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
        password_validation(params.ranger_usersync_ldap_ldapbindpassword)

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    Directory(params.usersync_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              cd_access='a',
              create_parents=True,
              mode=0755,
              recursive_ownership=True)

    File(format('{ranger_ugsync_conf}/ranger-usersync-env-logdir.sh'),
         content=format("export logdir={usersync_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    Directory(format("{ranger_ugsync_conf}/"), owner=params.unix_user)

    if upgrade_type is not None:
        src_file = format(
            '{usersync_home}/conf.dist/ranger-ugsync-default.xml')
        dst_file = format('{usersync_home}/conf/ranger-ugsync-default.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    if params.stack_supports_ranger_log4j:
        File(format('{usersync_home}/conf/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=params.usersync_log4j,
             mode=0644)
    elif upgrade_type is not None and not params.stack_supports_ranger_log4j:
        src_file = format('{usersync_home}/conf.dist/log4j.xml')
        dst_file = format('{usersync_home}/conf/log4j.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    XmlConfig(
        "ranger-ugsync-site.xml",
        conf_dir=ranger_ugsync_conf,
        configurations=params.config['configurations']['ranger-ugsync-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-ugsync-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    if os.path.isfile(params.ranger_ugsync_default_file):
        File(params.ranger_ugsync_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.usgsync_log4j_file):
        File(params.usgsync_log4j_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.cred_validator_file):
        File(params.cred_validator_file, group=params.unix_group, mode=04555)

    ranger_credential_helper(params.ugsync_cred_lib,
                             'usersync.ssl.key.password',
                             params.ranger_usersync_keystore_password,
                             params.ugsync_jceks_path)

    if not is_empty(
            params.ranger_usersync_ldap_ldapbindpassword
    ) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
        ranger_credential_helper(params.ugsync_cred_lib,
                                 'ranger.usersync.ldap.bindalias',
                                 params.ranger_usersync_ldap_ldapbindpassword,
                                 params.ugsync_jceks_path)

    ranger_credential_helper(params.ugsync_cred_lib,
                             'usersync.ssl.truststore.password',
                             params.ranger_usersync_truststore_password,
                             params.ugsync_jceks_path)

    File(params.ugsync_jceks_path,
         owner=params.unix_user,
         group=params.unix_group,
         mode=0640)

    File([params.usersync_start, params.usersync_stop],
         owner=params.unix_user,
         group=params.unix_group)

    File(
        params.usersync_services_file,
        mode=0755,
    )

    Execute(('ln', '-sf', format('{usersync_services_file}'),
             '/usr/bin/ranger-usersync'),
            not_if=format("ls /usr/bin/ranger-usersync"),
            only_if=format("ls {usersync_services_file}"),
            sudo=True)

    if not os.path.isfile(params.ranger_usersync_keystore_file):
        cmd = format(
            "{java_home}/bin/keytool -genkeypair -keyalg RSA -alias selfsigned -keystore '{ranger_usersync_keystore_file}' -keypass {ranger_usersync_keystore_password!p} -storepass {ranger_usersync_keystore_password!p} -validity 3600 -keysize 2048 -dname '{default_dn_name}'"
        )

        Execute(cmd, logoutput=True, user=params.unix_user)

        File(params.ranger_usersync_keystore_file,
             owner=params.unix_user,
             group=params.unix_group,
             mode=0640)

    create_core_site_xml(ranger_ugsync_conf)
Beispiel #25
0
def yarn(name=None, config_dir=None):
    """
  :param name: Component name, apptimelinereader, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
  """
    import params

    install_lzo_if_needed()

    if config_dir is None:
        config_dir = params.hadoop_conf_dir

    Directory(
        [params.yarn_log_dir_prefix],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        ignore_failures=True,
        cd_access='a',
        mode=0775,
    )

    Directory(
        [params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )

    Directory(
        [
            params.mapred_pid_dir_prefix, params.mapred_pid_dir,
            params.mapred_log_dir_prefix, params.mapred_log_dir
        ],
        owner=params.mapred_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )

    Directory(
        params.yarn_hbase_conf_dir,
        owner=params.yarn_hbase_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )

    # Some of these function calls depend on the directories above being created first.
    if name == 'resourcemanager':
        setup_resourcemanager()
    elif name == 'nodemanager':
        setup_nodemanager()
    elif name == 'apptimelineserver':
        setup_ats()
    elif name == 'historyserver':
        setup_historyserver()
    elif name == 'apptimelinereader':
        if not params.use_external_hbase and not params.is_hbase_system_service_launch:
            setup_atsv2_hbase_directories()
            setup_atsv2_hbase_files()

    generate_logfeeder_input_config(
        'yarn', Template("input.config-yarn.json.j2", extra_imports=[default]))

    # if there is the viewFS mount table content, create separate xml config and include in in the core-site
    # else just create core-site
    if params.mount_table_content:
        XmlConfig(
            "core-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['core-site'],
            configuration_attributes=params.config['configurationAttributes']
            ['core-site'],
            owner=params.hdfs_user,
            group=params.user_group,
            mode=0644,
            xml_include_file=os.path.join(config_dir,
                                          params.xml_inclusion_file_name))

        File(os.path.join(config_dir, params.xml_inclusion_file_name),
             owner=params.hdfs_user,
             group=params.user_group,
             content=params.mount_table_content,
             mode=0644)
    else:
        XmlConfig(
            "core-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['core-site'],
            configuration_attributes=params.config['configurationAttributes']
            ['core-site'],
            owner=params.hdfs_user,
            group=params.user_group,
            mode=0644)

    # During RU, Core Masters and Slaves need hdfs-site.xml
    # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
    # RU should rely on all available in <stack-root>/<version>/hadoop/conf
    XmlConfig("hdfs-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['hdfs-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['hdfs-site'],
              owner=params.hdfs_user,
              group=params.user_group,
              mode=0644)

    XmlConfig("mapred-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['mapred-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['mapred-site'],
              owner=params.yarn_user,
              group=params.user_group,
              mode=0644)

    configs = {}
    configs.update(params.config['configurations']['yarn-site'])
    configs["hadoop.registry.dns.bind-port"] = params.config['configurations'][
        'yarn-env']['registry.dns.bind-port']
    XmlConfig("yarn-site.xml",
              conf_dir=config_dir,
              configurations=configs,
              configuration_attributes=params.config['configurationAttributes']
              ['yarn-site'],
              owner=params.yarn_user,
              group=params.user_group,
              mode=0644)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configurationAttributes']
        ['capacity-scheduler'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "hbase-site.xml",
        conf_dir=params.yarn_hbase_conf_dir,
        configurations=params.config['configurations']['yarn-hbase-site'],
        configuration_attributes=params.config['configurationAttributes']
        ['yarn-hbase-site'],
        owner=params.yarn_hbase_user,
        group=params.user_group,
        mode=0644)

    XmlConfig("resource-types.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['resource-types'],
              configuration_attributes=params.config['configurationAttributes']
              ['resource-types'],
              owner=params.yarn_user,
              group=params.user_group,
              mode=0644)

    File(format("{limits_conf_dir}/yarn.conf"),
         mode=0644,
         content=Template('yarn.conf.j2'))

    File(format("{limits_conf_dir}/mapreduce.conf"),
         mode=0644,
         content=Template('mapreduce.conf.j2'))

    File(os.path.join(config_dir, "yarn-env.sh"),
         owner=params.yarn_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.yarn_env_sh_template))

    File(format("{yarn_bin}/container-executor"),
         group=params.yarn_executor_container_group,
         mode=params.container_executor_mode)

    File(os.path.join(config_dir, "container-executor.cfg"),
         group=params.user_group,
         mode=0644,
         content=InlineTemplate(params.container_executor_cfg_template))

    Directory(params.cgroups_dir,
              group=params.user_group,
              create_parents=True,
              mode=0755,
              cd_access="a")

    File(os.path.join(config_dir, "mapred-env.sh"),
         owner=params.tc_owner,
         mode=0755,
         content=InlineTemplate(params.mapred_env_sh_template))

    if params.yarn_nodemanager_recovery_dir:
        Directory(
            InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            mode=0755,
            cd_access='a',
        )

    if params.security_enabled:
        File(os.path.join(params.hadoop_bin, "task-controller"),
             owner="root",
             group=params.mapred_tt_group,
             mode=06050)
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             mode=params.tc_mode,
             group=params.mapred_tt_group,
             content=Template("taskcontroller.cfg.j2"))
        File(os.path.join(config_dir, 'yarn_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             mode=0644,
             content=Template("yarn_jaas.conf.j2"))
        if params.has_ats:
            File(os.path.join(config_dir, 'yarn_ats_jaas.conf'),
                 owner=params.yarn_user,
                 group=params.user_group,
                 mode=0644,
                 content=Template("yarn_ats_jaas.conf.j2"))
        if params.has_registry_dns:
            File(os.path.join(config_dir, 'yarn_registry_dns_jaas.conf'),
                 owner=params.yarn_user,
                 group=params.user_group,
                 mode=0644,
                 content=Template("yarn_registry_dns_jaas.conf.j2"))
        File(os.path.join(config_dir, 'yarn_nm_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             mode=0644,
             content=Template("yarn_nm_jaas.conf.j2"))
        if params.has_hs:
            File(os.path.join(config_dir, 'mapred_jaas.conf'),
                 owner=params.mapred_user,
                 group=params.user_group,
                 mode=0644,
                 content=Template("mapred_jaas.conf.j2"))
    else:
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             content=Template("taskcontroller.cfg.j2"))

    XmlConfig("mapred-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['mapred-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['mapred-site'],
              mode=0644,
              owner=params.mapred_user,
              group=params.user_group)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configurationAttributes']
        ['capacity-scheduler'],
        mode=0644,
        owner=params.hdfs_user,
        group=params.user_group)

    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-client'],
            mode=0644,
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-client'],
            mode=0644,
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-server'],
            mode=0644,
            owner=params.hdfs_user,
            group=params.user_group)
    if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
        File(os.path.join(config_dir, 'fair-scheduler.xml'),
             mode=0644,
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-client.xml.example')):
        File(os.path.join(config_dir, 'ssl-client.xml.example'),
             mode=0644,
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-server.xml.example')):
        File(os.path.join(config_dir, 'ssl-server.xml.example'),
             mode=0644,
             owner=params.mapred_user,
             group=params.user_group)

    setup_atsv2_backend(name, config_dir)
Beispiel #26
0
def setup_tagsync(upgrade_type=None):
    import params

    ranger_tagsync_home = params.ranger_tagsync_home
    ranger_home = params.ranger_home
    ranger_tagsync_conf = params.ranger_tagsync_conf

    Directory(format("{ranger_tagsync_conf}"),
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    Directory(
        params.ranger_pid_dir,
        mode=0755,
        create_parents=True,
        owner=params.unix_user,
        group=params.user_group,
        cd_access="a",
    )

    Directory(params.tagsync_log_dir,
              create_parents=True,
              owner=params.unix_user,
              group=params.unix_group,
              cd_access="a",
              mode=0755)

    File(format('{ranger_tagsync_conf}/ranger-tagsync-env-logdir.sh'),
         content=format("export RANGER_TAGSYNC_LOG_DIR={tagsync_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    XmlConfig(
        "ranger-tagsync-site.xml",
        conf_dir=ranger_tagsync_conf,
        configurations=params.config['configurations']['ranger-tagsync-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-tagsync-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    PropertiesFile(
        format('{ranger_tagsync_conf}/atlas-application.properties'),
        properties=params.tagsync_application_properties,
        mode=0755,
        owner=params.unix_user,
        group=params.unix_group)

    File(format('{ranger_tagsync_conf}/log4j.properties'),
         owner=params.unix_user,
         group=params.unix_group,
         content=params.tagsync_log4j,
         mode=0644)

    File(
        params.tagsync_services_file,
        mode=0755,
    )

    Execute(('ln', '-sf', format('{tagsync_services_file}'),
             '/usr/bin/ranger-tagsync'),
            not_if=format("ls /usr/bin/ranger-tagsync"),
            only_if=format("ls {tagsync_services_file}"),
            sudo=True)

    create_core_site_xml(ranger_tagsync_conf)
Beispiel #27
0
def yarn(name=None, config_dir=None):
    """
  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
  """
    import params

    install_lzo_if_needed()

    if config_dir is None:
        config_dir = params.hadoop_conf_dir

    if params.yarn_nodemanager_recovery_dir:
        Directory(
            InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            mode=0755,
            cd_access='a',
        )

    Directory(
        [params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [
            params.mapred_pid_dir_prefix, params.mapred_pid_dir,
            params.mapred_log_dir_prefix, params.mapred_log_dir
        ],
        owner=params.mapred_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [params.yarn_log_dir_prefix],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        ignore_failures=True,
        cd_access='a',
    )

    # Some of these function calls depend on the directories above being created first.
    if name == 'resourcemanager':
        setup_resourcemanager()
    elif name == 'nodemanager':
        setup_nodemanager()
    elif name == 'apptimelineserver':
        setup_ats()
    elif name == 'historyserver':
        setup_historyserver()

    XmlConfig(
        "core-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['core-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    # During RU, Core Masters and Slaves need hdfs-site.xml
    # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
    # RU should rely on all available in <stack-root>/<version>/hadoop/conf
    XmlConfig(
        "hdfs-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['hdfs-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['hdfs-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "yarn-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['yarn-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['yarn-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configuration_attributes']
        ['capacity-scheduler'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    File(format("{limits_conf_dir}/yarn.conf"),
         mode=0644,
         content=Template('yarn.conf.j2'))

    File(format("{limits_conf_dir}/mapreduce.conf"),
         mode=0644,
         content=Template('mapreduce.conf.j2'))

    File(os.path.join(config_dir, "yarn-env.sh"),
         owner=params.yarn_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.yarn_env_sh_template))

    File(format("{yarn_container_bin}/container-executor"),
         group=params.yarn_executor_container_group,
         mode=params.container_executor_mode)

    File(os.path.join(config_dir, "container-executor.cfg"),
         group=params.user_group,
         mode=0644,
         content=Template('container-executor.cfg.j2'))

    Directory(params.cgroups_dir,
              group=params.user_group,
              create_parents=True,
              mode=0755,
              cd_access="a")

    File(os.path.join(config_dir, "mapred-env.sh"),
         owner=params.tc_owner,
         mode=0755,
         content=InlineTemplate(params.mapred_env_sh_template))

    if params.security_enabled:
        File(os.path.join(params.hadoop_bin, "task-controller"),
             owner="root",
             group=params.mapred_tt_group,
             mode=06050)
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             mode=params.tc_mode,
             group=params.mapred_tt_group,
             content=Template("taskcontroller.cfg.j2"))
        File(os.path.join(config_dir, 'yarn_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             content=Template("yarn_jaas.conf.j2"))
        if params.has_ats:
            File(os.path.join(config_dir, 'yarn_ats_jaas.conf'),
                 owner=params.yarn_user,
                 group=params.user_group,
                 content=Template("yarn_ats_jaas.conf.j2"))
        File(os.path.join(config_dir, 'yarn_nm_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             content=Template("yarn_nm_jaas.conf.j2"))
        if params.has_hs:
            File(os.path.join(config_dir, 'mapred_jaas.conf'),
                 owner=params.mapred_user,
                 group=params.user_group,
                 content=Template("mapred_jaas.conf.j2"))
    else:
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             content=Template("taskcontroller.cfg.j2"))

    XmlConfig(
        "mapred-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.mapred_user,
        group=params.user_group)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configuration_attributes']
        ['capacity-scheduler'],
        owner=params.hdfs_user,
        group=params.user_group)

    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)
    if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
        File(os.path.join(config_dir, 'fair-scheduler.xml'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-client.xml.example')):
        File(os.path.join(config_dir, 'ssl-client.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-server.xml.example')):
        File(os.path.join(config_dir, 'ssl-server.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)
Beispiel #28
0
def setup_ranger_admin(upgrade_type=None):
    import params

    if upgrade_type is None:
        upgrade_type = Script.get_upgrade_type(
            default("/commandParams/upgrade_type", ""))

    ranger_home = params.ranger_home
    ranger_conf = params.ranger_conf

    Directory(ranger_conf,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    copy_jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
        cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
    else:
        cp = cp + os.pathsep + format("{driver_curl_target}")
    cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")

    db_connection_check_command = format(
        "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}"
    )

    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
        env_dict = {'LD_LIBRARY_PATH': params.ld_lib_path}

    Execute(db_connection_check_command,
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            tries=5,
            try_sleep=10,
            environment=env_dict)

    Execute(
        ('ln', '-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'),
         format('{ranger_home}/conf')),
        not_if=format("ls {ranger_home}/conf"),
        only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
        sudo=True)

    if upgrade_type is not None:
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')

        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    Directory(
        format('{ranger_home}/'),
        owner=params.unix_user,
        group=params.unix_group,
        recursive_ownership=True,
    )

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    Directory(params.admin_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True,
              cd_access='a',
              mode=0755)

    File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
         content=format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    if os.path.isfile(params.ranger_admin_default_file):
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.ranger_admin_default_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.security_app_context_file):
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.security_app_context_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)

    if upgrade_type is not None and params.stack_supports_config_versioning:
        if os.path.islink('/usr/bin/ranger-admin'):
            Link('/usr/bin/ranger-admin', action="delete")

        Link('/usr/bin/ranger-admin',
             to=format('{ranger_home}/ews/ranger-admin-services.sh'))

    Execute(('ln', '-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),
             '/usr/bin/ranger-admin'),
            not_if=format("ls /usr/bin/ranger-admin"),
            only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
            sudo=True)

    XmlConfig(
        "ranger-admin-site.xml",
        conf_dir=ranger_conf,
        configurations=params.config['configurations']['ranger-admin-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-admin-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    Directory(
        os.path.join(ranger_conf, 'ranger_jaas'),
        mode=0700,
        owner=params.unix_user,
        group=params.unix_group,
    )

    if params.stack_supports_ranger_log4j:
        File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=params.admin_log4j,
             mode=0644)

    do_keystore_setup(upgrade_type=upgrade_type)

    create_core_site_xml(ranger_conf)
Beispiel #29
0
def setup_usersync(rolling_upgrade=False):
    import params

    usersync_home = params.usersync_home
    ranger_home = params.ranger_home
    ranger_ugsync_conf = params.ranger_ugsync_conf

    if rolling_upgrade:
        usersync_home = format("/usr/hdp/{version}/ranger-usersync")
        ranger_home = format("/usr/hdp/{version}/ranger-admin")
        ranger_ugsync_conf = format("/usr/hdp/{version}/ranger-usersync/conf")

    Directory(params.ranger_pid_dir,
              mode=0750,
              owner=params.unix_user,
              group=params.unix_group)

    Directory(params.usersync_log_dir,
              owner=params.unix_user,
              group=params.unix_group)

    Directory(format("{ranger_ugsync_conf}/"), owner=params.unix_user)

    if rolling_upgrade:
        src_file = format(
            '{usersync_home}/conf.dist/ranger-ugsync-default.xml')
        dst_file = format('{usersync_home}/conf/ranger-ugsync-default.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

        src_file = format('{usersync_home}/conf.dist/log4j.xml')
        dst_file = format('{usersync_home}/conf/log4j.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    XmlConfig(
        "ranger-ugsync-site.xml",
        conf_dir=ranger_ugsync_conf,
        configurations=params.config['configurations']['ranger-ugsync-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-ugsync-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    if os.path.isfile(params.ranger_ugsync_default_file):
        File(params.ranger_ugsync_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.usgsync_log4j_file):
        File(params.usgsync_log4j_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.cred_validator_file):
        File(params.cred_validator_file, group=params.unix_group, mode=04555)

    cred_lib = os.path.join(usersync_home, "lib", "*")
    cred_setup_prefix = (format('{ranger_home}/ranger_credential_helper.py'),
                         '-l', cred_lib)

    cred_setup = cred_setup_prefix + (
        '-f', params.ugsync_jceks_path, '-k', 'usersync.ssl.key.password',
        '-v', PasswordString(
            params.ranger_usersync_keystore_password), '-c', '1')
    Execute(cred_setup,
            environment={
                'RANGER_ADMIN_HOME': ranger_home,
                'JAVA_HOME': params.java_home
            },
            logoutput=True,
            sudo=True)

    cred_setup = cred_setup_prefix + (
        '-f', params.ugsync_jceks_path, '-k', 'ranger.usersync.ldap.bindalias',
        '-v', PasswordString(
            params.ranger_usersync_ldap_ldapbindpassword), '-c', '1')
    Execute(cred_setup,
            environment={
                'RANGER_ADMIN_HOME': ranger_home,
                'JAVA_HOME': params.java_home
            },
            logoutput=True,
            sudo=True)

    cred_setup = cred_setup_prefix + (
        '-f', params.ugsync_jceks_path, '-k',
        'usersync.ssl.truststore.password', '-v',
        PasswordString(params.ranger_usersync_truststore_password), '-c', '1')
    Execute(cred_setup,
            environment={
                'RANGER_ADMIN_HOME': ranger_home,
                'JAVA_HOME': params.java_home
            },
            logoutput=True,
            sudo=True)

    File(params.ugsync_jceks_path,
         owner=params.unix_user,
         group=params.unix_group,
         mode=0640)

    File([params.usersync_start, params.usersync_stop],
         owner=params.unix_user,
         group=params.unix_group)

    File(
        params.usersync_services_file,
        mode=0755,
    )

    Execute(('ln', '-sf', format('{usersync_services_file}'),
             '/usr/bin/ranger-usersync'),
            not_if=format("ls /usr/bin/ranger-usersync"),
            only_if=format("ls {usersync_services_file}"),
            sudo=True)

    if not os.path.isfile(params.ranger_usersync_keystore_file):
        cmd = format(
            "{java_home}/bin/keytool -genkeypair -keyalg RSA -alias selfsigned -keystore '{ranger_usersync_keystore_file}' -keypass {ranger_usersync_keystore_password!p} -storepass {ranger_usersync_keystore_password!p} -validity 3600 -keysize 2048 -dname '{default_dn_name}'"
        )

        Execute(cmd, logoutput=True, user=params.unix_user)

        File(params.ranger_usersync_keystore_file,
             owner=params.unix_user,
             group=params.unix_group,
             mode=0640)
Beispiel #30
0
def kms(upgrade_type=None):
    import params

    if params.has_ranger_admin:

        Directory(params.kms_conf_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  recursive=True)

        if upgrade_type is not None:
            copy_jdbc_connector(stack_version=params.version)

        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
            mode=0644,
        )

        cp = format("{check_db_connection_jar}")
        cp = cp + os.pathsep + format(
            "{kms_home}/ews/webapp/lib/{jdbc_jar_name}")

        db_connection_check_command = format(
            "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_kms_jdbc_connection_url}' {db_user} {db_password!p} {ranger_kms_jdbc_driver}"
        )

        env_dict = {}
        if params.db_flavor.lower() == 'sqla':
            env_dict = {'LD_LIBRARY_PATH': params.ld_library_path}

        Execute(db_connection_check_command,
                path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
                tries=5,
                try_sleep=10,
                environment=env_dict)

        if params.xa_audit_db_is_enabled:
            File(params.downloaded_connector_path,
                 content=DownloadSource(params.driver_source),
                 mode=0644)

            Execute(('cp', '--remove-destination',
                     params.downloaded_connector_path, params.driver_target),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

            File(params.driver_target, mode=0644)

        Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF',
                               'classes', 'lib'),
                  mode=0755,
                  owner=params.kms_user,
                  group=params.kms_group)

        Execute(('cp', format('{kms_home}/ranger-kms-initd'),
                 '/etc/init.d/ranger-kms'),
                not_if=format('ls /etc/init.d/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File('/etc/init.d/ranger-kms', mode=0755)

        Execute(('chown', '-R', format('{kms_user}:{kms_group}'),
                 format('{kms_home}/')),
                sudo=True)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  cd_access='a',
                  recursive=True,
                  mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms'),
                not_if=format('ls /usr/bin/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms-services.sh'),
                not_if=format('ls /usr/bin/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms-services.sh', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms-initd'),
                 format('{kms_home}/ranger-kms-services.sh')),
                not_if=format('ls {kms_home}/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File(format('{kms_home}/ranger-kms-services.sh'), mode=0755)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775)

        do_keystore_setup(params.credential_provider_path, params.jdbc_alias,
                          params.db_password)
        do_keystore_setup(params.credential_provider_path,
                          params.masterkey_alias,
                          params.kms_master_key_password)

        XmlConfig(
            "dbks-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['dbks-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['dbks-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        XmlConfig(
            "ranger-kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        XmlConfig(
            "kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
             owner=params.kms_user,
             group=params.kms_group,
             content=params.kms_log4j,
             mode=0644)