Пример #1
0
sudo = AMBARI_SUDO_BINARY
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)

upgrade_direction = default("/commandParams/upgrade_direction", None)
java_home = config['hostLevelParams']['java_home']
stack_name = status_params.stack_name
stack_root = Script.get_stack_root()

version_for_stack_feature_checks = get_stack_feature_version(config)

sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

spark_conf = '/etc/spark/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")

if check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks):
  hadoop_home = stack_select.get_hadoop_dir("home")
  spark_conf = format("{stack_root}/current/{component_directory}/conf")
  spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
  spark_daemon_memory = config['configurations']['spark-env']['spark_daemon_memory']
Пример #2
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(format('{conf_dir}/solr'),
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))

        files_to_chown = [
            format("{conf_dir}/policy-store.txt"),
            format("{conf_dir}/users-credentials.properties")
        ]
        for file in files_to_chown:
            if os.path.exists(file):
                Execute(
                    ('chown', format('{metadata_user}:{user_group}'), file),
                    sudo=True)
                Execute(('chmod', '644', file), sudo=True)

        if params.metadata_solrconfig_content:
            File(format("{conf_dir}/solr/solrconfig.xml"),
                 mode=0644,
                 owner=params.metadata_user,
                 group=params.user_group,
                 content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
        solr_cloud_util.setup_solr_client(params.config)
        check_znode()
        jaasFile = params.atlas_jaas_file if params.security_enabled else None
        upload_conf_set('atlas_configs', jaasFile)

        create_collection('vertex_index', 'atlas_configs', jaasFile)
        create_collection('edge_index', 'atlas_configs', jaasFile)
        create_collection('fulltext_index', 'atlas_configs', jaasFile)

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2"))

    is_atlas_upgrade_support = check_stack_feature(
        StackFeature.ATLAS_UPGRADE_SUPPORT,
        get_stack_feature_version(params.config))

    if is_atlas_upgrade_support and params.security_enabled:

        File(params.atlas_kafka_setup,
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("atlas_kafka_acl.sh.j2"))

        #  files required only in case if kafka broker is not present on the host as configured component
        if not params.host_with_kafka:
            File(format("{kafka_conf_dir}/kafka-env.sh"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_env_sh_template))

            File(format("{kafka_conf_dir}/kafka_jaas.conf"),
                 group=params.user_group,
                 owner=params.kafka_user,
                 content=Template("kafka_jaas.conf.j2"))
Пример #3
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents = True
    )

    if type == "server":
      Directory([params.pid_dir],
                mode=0755,
                cd_access='a',
                owner=params.metadata_user,
                group=params.user_group,
                create_parents = True
      )
      Directory(format('{conf_dir}/solr'),
                mode=0755,
                cd_access='a',
                owner=params.metadata_user,
                group=params.user_group,
                create_parents = True,
                recursive_ownership=True
      )
      Directory(params.log_dir,
                mode=0755,
                cd_access='a',
                owner=params.metadata_user,
                group=params.user_group,
                create_parents = True
      )
      Directory(params.data_dir,
                mode=0644,
                cd_access='a',
                owner=params.metadata_user,
                group=params.user_group,
                create_parents = True
      )
      Directory(params.expanded_war_dir,
                mode=0644,
                cd_access='a',
                owner=params.metadata_user,
                group=params.user_group,
                create_parents = True
      )
      File(format("{expanded_war_dir}/atlas.war"),
           content = StaticFile(format('{metadata_home}/server/webapp/atlas.war'))
      )
      File(format("{conf_dir}/atlas-log4j.xml"),
           mode=0644,
           owner=params.metadata_user,
           group=params.user_group,
           content=InlineTemplate(params.metadata_log4j_content)
      )
      File(format("{conf_dir}/atlas-env.sh"),
           owner=params.metadata_user,
           group=params.user_group,
           mode=0755,
           content=InlineTemplate(params.metadata_env_content)
      )

      if not is_empty(params.atlas_admin_username) and not is_empty(params.atlas_admin_password):
        psswd_output = hashlib.sha256(params.atlas_admin_password).hexdigest()
        ModifyPropertiesFile(format("{conf_dir}/users-credentials.properties"),
            properties = {format('{atlas_admin_username}') : format('ROLE_ADMIN::{psswd_output}')},
            owner = params.metadata_user
        )

      files_to_chown = [format("{conf_dir}/policy-store.txt"), format("{conf_dir}/users-credentials.properties")]
      for file in files_to_chown:
        if os.path.exists(file):
          Execute(('chown', format('{metadata_user}:{user_group}'), file),
                  sudo=True
                  )
          Execute(('chmod', '644', file),
                  sudo=True
                  )

      if params.metadata_solrconfig_content:
        File(format("{conf_dir}/solr/solrconfig.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_solrconfig_content)
        )

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
         properties = params.application_properties,
         mode=0600,
         owner=params.metadata_user,
         group=params.user_group
    )

    if params.security_enabled:
      TemplateConfig(format(params.atlas_jaas_file),
                     owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
      solr_cloud_util.setup_solr_client(params.config)
      check_znode()
      jaasFile=params.atlas_jaas_file if params.security_enabled else None
      upload_conf_set('atlas_configs', jaasFile)

      if params.security_enabled: # update permissions before creating the collections
        solr_cloud_util.add_solr_roles(params.config,
                                       roles = [params.infra_solr_role_atlas, params.infra_solr_role_ranger_audit, params.infra_solr_role_dev],
                                       new_service_principals = [params.atlas_jaas_principal])

      create_collection('vertex_index', 'atlas_configs', jaasFile)
      create_collection('edge_index', 'atlas_configs', jaasFile)
      create_collection('fulltext_index', 'atlas_configs', jaasFile)

      if params.security_enabled:
        secure_znode(format('{infra_solr_znode}/configs/atlas_configs'), jaasFile)
        secure_znode(format('{infra_solr_znode}/collections/vertex_index'), jaasFile)
        secure_znode(format('{infra_solr_znode}/collections/edge_index'), jaasFile)
        secure_znode(format('{infra_solr_znode}/collections/fulltext_index'), jaasFile)

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2")
    )

    is_atlas_upgrade_support = check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, get_stack_feature_version(params.config))

    if is_atlas_upgrade_support and params.security_enabled:

      File(params.atlas_kafka_setup,
           group=params.user_group,
           owner=params.kafka_user,
           content=Template("atlas_kafka_acl.sh.j2"))

      #  files required only in case if kafka broker is not present on the host as configured component
      if not params.host_with_kafka:
        File(format("{kafka_conf_dir}/kafka-env.sh"),
             owner=params.kafka_user,
             content=InlineTemplate(params.kafka_env_sh_template))

        File(format("{kafka_conf_dir}/kafka_jaas.conf"),
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("kafka_jaas.conf.j2"))

    if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(params.namenode_host) > 1:
      XmlConfig("hdfs-site.xml",
                conf_dir=params.conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
                owner=params.metadata_user,
                group=params.user_group,
                mode=0644
                )
    else:
      File(format('{conf_dir}/hdfs-site.xml'), action="delete")

    '''
    Atlas requires hadoop core-site.xml to resolve users/groups synced in HadoopUGI for
    authentication and authorization process. Earlier the core-site.xml was available in
    Hbase conf directory which is a part of Atlas class-path, from stack 2.6 onwards,
    core-site.xml is no more available in Hbase conf directory. Hence need to create
    core-site.xml in Atlas conf directory.
    '''
    if params.stack_supports_atlas_core_site and params.has_namenode:
      XmlConfig("core-site.xml",
        conf_dir=params.conf_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configurationAttributes']['core-site'],
        owner=params.metadata_user,
        group=params.user_group,
        mode=0644
      )

    Directory(format('{metadata_home}/'),
      owner = params.metadata_user,
      group = params.user_group,
      recursive_ownership = True,
    )
Пример #4
0
import os
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions import stack_features
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature

config = Script.get_config()
stack_root = status_params.stack_root
stack_name = status_params.stack_name

agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)

version = stack_features.get_stack_feature_version(config)

stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
upgrade_direction = default("/commandParams/upgrade_direction", None)
jdk_location = config['hostLevelParams']['jdk_location']

etc_prefix_dir = "/etc/falcon"

# hadoop params
hadoop_home_dir = stack_select.get_hadoop_dir("home")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")

if check_stack_feature(StackFeature.ROLLING_UPGRADE, version):
    # if this is a server action, then use the server binaries; smoke tests
    # use the client binaries