コード例 #1
0
def yarn(name=None, config_dir=None):
    """
  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
  """
    import params

    if config_dir is None:
        config_dir = params.hadoop_conf_dir

    if params.yarn_nodemanager_recovery_dir:
        Directory(
            InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            mode=0755,
            cd_access='a',
        )

    Directory(
        [params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [
            params.mapred_pid_dir_prefix, params.mapred_pid_dir,
            params.mapred_log_dir_prefix, params.mapred_log_dir
        ],
        owner=params.mapred_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [params.yarn_log_dir_prefix],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        ignore_failures=True,
        cd_access='a',
    )

    # Some of these function calls depend on the directories above being created first.
    if name == 'resourcemanager':
        setup_resourcemanager()
    elif name == 'nodemanager':
        setup_nodemanager()
    elif name == 'apptimelineserver':
        setup_ats()
    elif name == 'historyserver':
        setup_historyserver()

    XmlConfig(
        "core-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['core-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    # During RU, Core Masters and Slaves need hdfs-site.xml
    # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
    # RU should rely on all available in <stack-root>/<version>/hadoop/conf
    XmlConfig(
        "hdfs-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['hdfs-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['hdfs-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "yarn-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['yarn-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['yarn-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configuration_attributes']
        ['capacity-scheduler'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    File(format("{limits_conf_dir}/yarn.conf"),
         mode=0644,
         content=Template('yarn.conf.j2'))

    File(format("{limits_conf_dir}/mapreduce.conf"),
         mode=0644,
         content=Template('mapreduce.conf.j2'))

    File(os.path.join(config_dir, "yarn-env.sh"),
         owner=params.yarn_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.yarn_env_sh_template))

    File(format("{yarn_container_bin}/container-executor"),
         group=params.yarn_executor_container_group,
         mode=params.container_executor_mode)

    File(os.path.join(config_dir, "container-executor.cfg"),
         group=params.user_group,
         mode=0644,
         content=Template('container-executor.cfg.j2'))

    Directory(params.cgroups_dir,
              group=params.user_group,
              create_parents=True,
              mode=0755,
              cd_access="a")

    File(os.path.join(config_dir, "mapred-env.sh"),
         owner=params.tc_owner,
         mode=0755,
         content=InlineTemplate(params.mapred_env_sh_template))

    if params.security_enabled:
        File(os.path.join(params.hadoop_bin, "task-controller"),
             owner="root",
             group=params.mapred_tt_group,
             mode=06050)
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             mode=params.tc_mode,
             group=params.mapred_tt_group,
             content=Template("taskcontroller.cfg.j2"))
        File(os.path.join(config_dir, 'yarn_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             content=Template("yarn_jaas.conf.j2"))
    else:
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             content=Template("taskcontroller.cfg.j2"))

    XmlConfig(
        "mapred-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.mapred_user,
        group=params.user_group)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configuration_attributes']
        ['capacity-scheduler'],
        owner=params.hdfs_user,
        group=params.user_group)

    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)
    if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
        File(os.path.join(config_dir, 'fair-scheduler.xml'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-client.xml.example')):
        File(os.path.join(config_dir, 'ssl-client.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-server.xml.example')):
        File(os.path.join(config_dir, 'ssl-server.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)
コード例 #2
0
ファイル: hive.py プロジェクト: glenraynor/ambari
def fill_conf_dir(component_conf_dir):
    import params

    Directory(component_conf_dir,
              owner=params.hive_user,
              group=params.user_group,
              create_parents=True)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=component_conf_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    File(format("{component_conf_dir}/hive-default.xml.template"),
         owner=params.hive_user,
         group=params.user_group)

    File(format("{component_conf_dir}/hive-env.sh.template"),
         owner=params.hive_user,
         group=params.user_group)

    # Create hive-log4j.properties and hive-exec-log4j.properties
    # in /etc/hive/conf and not in /etc/hive2/conf
    if params.log4j_version == '1':
        log4j_exec_filename = 'hive-exec-log4j.properties'
        if (params.log4j_exec_props != None):
            File(format("{component_conf_dir}/{log4j_exec_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=params.log4j_exec_props)
        elif (os.path.exists(
                "{component_conf_dir}/{log4j_exec_filename}.template")):
            File(format("{component_conf_dir}/{log4j_exec_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=StaticFile(
                     format(
                         "{component_conf_dir}/{log4j_exec_filename}.template")
                 ))

        log4j_filename = 'hive-log4j.properties'
        if (params.log4j_props != None):
            File(format("{component_conf_dir}/{log4j_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=params.log4j_props)
        elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")
              ):
            File(format("{component_conf_dir}/{log4j_filename}"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.hive_user,
                 content=StaticFile(
                     format("{component_conf_dir}/{log4j_filename}.template")))
        pass  # if params.log4j_version == '1'
コード例 #3
0
def setup_usersync(upgrade_type=None):
    import params

    usersync_home = params.usersync_home
    ranger_home = params.ranger_home
    ranger_ugsync_conf = params.ranger_ugsync_conf

    if not is_empty(
            params.ranger_usersync_ldap_ldapbindpassword
    ) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
        password_validation(params.ranger_usersync_ldap_ldapbindpassword)

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    if params.stack_supports_pid:
        File(
            format('{ranger_ugsync_conf}/ranger-usersync-env-piddir.sh'),
            content=format(
                "export USERSYNC_PID_DIR_PATH={ranger_pid_dir}\nexport UNIX_USERSYNC_USER={unix_user}"
            ),
            owner=params.unix_user,
            group=params.unix_group,
            mode=0755)

    Directory(params.usersync_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              cd_access='a',
              create_parents=True,
              mode=0755,
              recursive_ownership=True)

    File(format('{ranger_ugsync_conf}/ranger-usersync-env-logdir.sh'),
         content=format("export logdir={usersync_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    Directory(format("{ranger_ugsync_conf}/"), owner=params.unix_user)

    if upgrade_type is not None:
        src_file = format(
            '{usersync_home}/conf.dist/ranger-ugsync-default.xml')
        dst_file = format('{usersync_home}/conf/ranger-ugsync-default.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    if params.stack_supports_ranger_log4j:
        File(format('{usersync_home}/conf/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=InlineTemplate(params.usersync_log4j),
             mode=0644)
    elif upgrade_type is not None and not params.stack_supports_ranger_log4j:
        src_file = format('{usersync_home}/conf.dist/log4j.xml')
        dst_file = format('{usersync_home}/conf/log4j.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    # remove plain-text password from xml configs
    ranger_ugsync_site_copy = {}
    ranger_ugsync_site_copy.update(
        params.config['configurations']['ranger-ugsync-site'])
    for prop in params.ranger_usersync_password_properties:
        if prop in ranger_ugsync_site_copy:
            ranger_ugsync_site_copy[prop] = "_"

    XmlConfig(
        "ranger-ugsync-site.xml",
        conf_dir=ranger_ugsync_conf,
        configurations=ranger_ugsync_site_copy,
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-ugsync-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    if os.path.isfile(params.ranger_ugsync_default_file):
        File(params.ranger_ugsync_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.usgsync_log4j_file):
        File(params.usgsync_log4j_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.cred_validator_file):
        File(params.cred_validator_file, group=params.unix_group, mode=04555)

    ranger_credential_helper(params.ugsync_cred_lib,
                             'usersync.ssl.key.password',
                             params.ranger_usersync_keystore_password,
                             params.ugsync_jceks_path)

    if not is_empty(
            params.ranger_usersync_ldap_ldapbindpassword
    ) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
        ranger_credential_helper(params.ugsync_cred_lib,
                                 'ranger.usersync.ldap.bindalias',
                                 params.ranger_usersync_ldap_ldapbindpassword,
                                 params.ugsync_jceks_path)

    ranger_credential_helper(params.ugsync_cred_lib,
                             'usersync.ssl.truststore.password',
                             params.ranger_usersync_truststore_password,
                             params.ugsync_jceks_path)

    File(params.ugsync_jceks_path,
         owner=params.unix_user,
         group=params.unix_group,
         mode=0640)

    File([params.usersync_start, params.usersync_stop],
         owner=params.unix_user,
         group=params.unix_group)

    File(
        params.usersync_services_file,
        mode=0755,
    )

    Execute(('ln', '-sf', format('{usersync_services_file}'),
             '/usr/bin/ranger-usersync'),
            not_if=format("ls /usr/bin/ranger-usersync"),
            only_if=format("ls {usersync_services_file}"),
            sudo=True)

    if not os.path.isfile(params.ranger_usersync_keystore_file):
        cmd = format(
            "{java_home}/bin/keytool -genkeypair -keyalg RSA -alias selfsigned -keystore '{ranger_usersync_keystore_file}' -keypass {ranger_usersync_keystore_password!p} -storepass {ranger_usersync_keystore_password!p} -validity 3600 -keysize 2048 -dname '{default_dn_name}'"
        )

        Execute(cmd, logoutput=True, user=params.unix_user)

        File(params.ranger_usersync_keystore_file,
             owner=params.unix_user,
             group=params.unix_group,
             mode=0640)

    create_core_site_xml(ranger_ugsync_conf)
コード例 #4
0
ファイル: oozie.py プロジェクト: Liujinan001/ambari-2.7.5
def oozie_server_specific(upgrade_type):
  import params

  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)

  File(params.pid_file,
    action="delete",
    not_if=no_op_test
  )

  oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
  Directory( oozie_server_directories,
    owner = params.oozie_user,
    group = params.user_group,
    mode = 0755,
    create_parents = True,
    cd_access="a",
  )

  Directory(params.oozie_libext_dir,
            create_parents = True,
  )

  hashcode_file = format("{oozie_home}/.hashcode")
  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")

  untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)

  Execute( untar_sharelib,    # time-expensive
    not_if  = format("{no_op_test} || {skip_recreate_sharelib}"),
    sudo = True,
  )

  configure_cmds = []
  # Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
  source_ext_zip_paths = get_oozie_ext_zip_source_paths(upgrade_type, params)

  # Copy the first oozie ext-2.2.zip file that is found.
  # This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
  if source_ext_zip_paths is not None:
    for source_ext_zip_path in source_ext_zip_paths:
      if os.path.isfile(source_ext_zip_path):
        configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir))
        configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))

        Execute(configure_cmds,
                not_if=no_op_test,
                sudo=True,
                )
        break


  Directory(params.oozie_webapps_conf_dir,
            owner = params.oozie_user,
            group = params.user_group,
            recursive_ownership = True,
            recursion_follow_links = True,
  )

  # download the database JAR
  download_database_library_if_needed()

  #falcon el extension
  if params.has_falcon_host:
    Execute(format('{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
      not_if  = no_op_test)

    Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
      not_if  = no_op_test)

  prepare_war(params)

  File(hashcode_file,
       mode = 0644,
  )

  if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_CREATE_HIVE_TEZ_CONFIGS, params.stack_version_formatted):
    # Create hive-site and tez-site configs for oozie
    Directory(params.hive_conf_dir,
        create_parents = True,
        owner = params.oozie_user,
        group = params.user_group
    )
    if 'hive-site' in params.config['configurations']:
      hive_site_config = update_credential_provider_path(params.config['configurations']['hive-site'],
                                                         'hive-site',
                                                         os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
                                                         params.oozie_user,
                                                         params.user_group
                                                         )
      XmlConfig("hive-site.xml",
        conf_dir=params.hive_conf_dir,
        configurations=hive_site_config,
        configuration_attributes=params.config['configurationAttributes']['hive-site'],
        owner=params.oozie_user,
        group=params.user_group,
        mode=0644
    )
    if 'tez-site' in params.config['configurations']:
      XmlConfig( "tez-site.xml",
        conf_dir = params.hive_conf_dir,
        configurations = params.config['configurations']['tez-site'],
        configuration_attributes=params.config['configurationAttributes']['tez-site'],
        owner = params.oozie_user,
        group = params.user_group,
        mode = 0664
    )

    # If Atlas is also installed, need to generate Atlas Hive hook (hive-atlas-application.properties file) in directory
    # {stack_root}/{current_version}/atlas/hook/hive/
    # Because this is a .properties file instead of an xml file, it will not be read automatically by Oozie.
    # However, should still save the file on this host so that can upload it to the Oozie Sharelib in DFS.
    if has_atlas_in_cluster():
      atlas_hook_filepath = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
      Logger.info("Has atlas in cluster, will save Atlas Hive hook into location %s" % str(atlas_hook_filepath))
      setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.oozie_user, params.user_group)

  Directory(params.oozie_server_dir,
    owner = params.oozie_user,
    group = params.user_group,
    recursive_ownership = True,
  )
  if params.security_enabled:
    File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'),
         owner=params.oozie_user,
         group=params.user_group,
         content=Template("zkmigrator_jaas.conf.j2")
         )
コード例 #5
0
def setup_conf_dir(
    name=None
):  # 'master' or 'tserver' or 'monitor' or 'gc' or 'tracer' or 'client'
    import params

    # create the conf directory
    Directory(params.conf_dir,
              mode=0755,
              owner=params.accumulo_user,
              group=params.user_group,
              create_parents=True)

    if name == 'client':
        dest_conf_dir = params.conf_dir

        # create a site file for client processes
        configs = {}
        configs.update(params.config['configurations']['accumulo-site'])
        if "instance.secret" in configs:
            configs.pop("instance.secret")
        if "trace.token.property.password" in configs:
            configs.pop("trace.token.property.password")
        XmlConfig(
            "accumulo-site.xml",
            conf_dir=dest_conf_dir,
            configurations=configs,
            configuration_attributes=params.config['configuration_attributes']
            ['accumulo-site'],
            owner=params.accumulo_user,
            group=params.user_group,
            mode=0644)

        # create env file
        File(format("{dest_conf_dir}/accumulo-env.sh"),
             mode=0644,
             group=params.user_group,
             owner=params.accumulo_user,
             content=InlineTemplate(params.env_sh_template))
    else:
        dest_conf_dir = params.server_conf_dir
        # create server conf directory
        Directory(params.server_conf_dir,
                  mode=0700,
                  owner=params.accumulo_user,
                  group=params.user_group,
                  create_parents=True)
        # create a site file for server processes
        configs = {}
        configs.update(params.config['configurations']['accumulo-site'])
        configs["instance.secret"] = str(
            params.config['configurations']['accumulo-env']['instance_secret'])
        configs["trace.token.property.password"] = str(params.trace_password)
        XmlConfig(
            "accumulo-site.xml",
            conf_dir=dest_conf_dir,
            configurations=configs,
            configuration_attributes=params.config['configuration_attributes']
            ['accumulo-site'],
            owner=params.accumulo_user,
            group=params.user_group,
            mode=0600)

        # create pid dir
        Directory(
            params.pid_dir,
            owner=params.accumulo_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        # create log dir
        Directory(
            params.log_dir,
            owner=params.accumulo_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        # create env file
        File(format("{dest_conf_dir}/accumulo-env.sh"),
             mode=0644,
             group=params.user_group,
             owner=params.accumulo_user,
             content=InlineTemplate(params.server_env_sh_template))

        if params.security_enabled:
            accumulo_TemplateConfig("accumulo_jaas.conf", dest_conf_dir)

    # create client.conf file
    configs = {}
    if 'client' in params.config['configurations']:
        configs.update(params.config['configurations']['client'])
    configs["instance.name"] = params.instance_name
    configs["instance.zookeeper.host"] = params.config['configurations'][
        'accumulo-site']['instance.zookeeper.host']
    copy_site_property(configs, 'instance.rpc.sasl.enabled')
    copy_site_property(configs, 'rpc.sasl.qop')
    copy_site_property(configs, 'rpc.useJsse')
    copy_site_property(configs, 'instance.rpc.ssl.clientAuth')
    copy_site_property(configs, 'instance.rpc.ssl.enabled')
    copy_site_property(configs, 'instance.zookeeper.timeout')
    copy_site_property(configs, 'trace.span.receivers')
    copy_site_property(configs, 'trace.zookeeper.path')
    for key, value in params.config['configurations'][
            'accumulo-site'].iteritems():
        if key.startswith("trace.span.receiver."):
            configs[key] = value
    PropertiesFile(format("{dest_conf_dir}/client.conf"),
                   properties=configs,
                   owner=params.accumulo_user,
                   group=params.user_group)

    # create log4j.properties files
    if (params.log4j_props != None):
        File(format("{dest_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.accumulo_user,
             content=params.log4j_props)
    else:
        File(format("{dest_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user)

    # create logging configuration files
    accumulo_TemplateConfig("auditLog.xml", dest_conf_dir)
    accumulo_TemplateConfig("generic_logger.xml", dest_conf_dir)
    accumulo_TemplateConfig("monitor_logger.xml", dest_conf_dir)
    accumulo_StaticFile("accumulo-metrics.xml", dest_conf_dir)

    # create host files
    accumulo_TemplateConfig("tracers", dest_conf_dir)
    accumulo_TemplateConfig("gc", dest_conf_dir)
    accumulo_TemplateConfig("monitor", dest_conf_dir)
    accumulo_TemplateConfig("slaves", dest_conf_dir)
    accumulo_TemplateConfig("masters", dest_conf_dir)

    # metrics configuration
    if params.has_metric_collector:
        accumulo_TemplateConfig("hadoop-metrics2-accumulo.properties",
                                dest_conf_dir)

    # other server setup
    if name == 'master':
        params.HdfsResource(format("/user/{params.accumulo_user}"),
                            type="directory",
                            action="create_on_execute",
                            owner=params.accumulo_user,
                            mode=0700)
        params.HdfsResource(format("{params.parent_dir}"),
                            type="directory",
                            action="create_on_execute",
                            owner=params.accumulo_user,
                            mode=0700)
        params.HdfsResource(None, action="execute")
        if params.security_enabled and params.has_secure_user_auth:
            Execute(format("{params.kinit_cmd} "
                           "{params.daemon_script} init "
                           "--user {params.accumulo_principal_name} "
                           "--instance-name {params.instance_name} "
                           "--clear-instance-name "
                           ">{params.log_dir}/accumulo-init.out "
                           "2>{params.log_dir}/accumulo-init.err"),
                    not_if=as_user(
                        format("{params.kinit_cmd} "
                               "{params.hadoop_bin_dir}/hadoop --config "
                               "{params.hadoop_conf_dir} fs -stat "
                               "{params.instance_volumes}"),
                        params.accumulo_user),
                    logoutput=True,
                    user=params.accumulo_user)
        else:
            passfile = format("{params.exec_tmp_dir}/pass")
            try:
                File(passfile,
                     mode=0600,
                     group=params.user_group,
                     owner=params.accumulo_user,
                     content=InlineTemplate('{{root_password}}\n'
                                            '{{root_password}}\n\n'))
                Execute(format("cat {passfile} | {params.daemon_script} init "
                               "--instance-name {params.instance_name} "
                               "--clear-instance-name "
                               ">{params.log_dir}/accumulo-init.out "
                               "2>{params.log_dir}/accumulo-init.err"),
                        not_if=as_user(
                            format("{params.kinit_cmd} "
                                   "{params.hadoop_bin_dir}/hadoop --config "
                                   "{params.hadoop_conf_dir} fs -stat "
                                   "{params.instance_volumes}"),
                            params.accumulo_user),
                        logoutput=True,
                        user=params.accumulo_user)
            finally:
                File(passfile, action="delete")

    if name == 'tracer':
        if params.security_enabled and params.has_secure_user_auth:
            Execute(format("{params.kinit_cmd} "
                           "{params.daemon_script} init --reset-security "
                           "--user {params.accumulo_principal_name} "
                           "--password NA "
                           ">{params.log_dir}/accumulo-reset.out "
                           "2>{params.log_dir}/accumulo-reset.err"),
                    not_if=as_user(
                        format("{params.kinit_cmd} "
                               "{params.daemon_script} shell -e "
                               "\"userpermissions -u "
                               "{params.accumulo_principal_name}\" | "
                               "grep System.CREATE_TABLE"),
                        params.accumulo_user),
                    user=params.accumulo_user)
            create_user(params.smokeuser_principal, params.smoke_test_password)
        else:
            # do not try to reset security in nonsecure mode, for now
            # Execute( format("{params.daemon_script} init --reset-security "
            #                 "--user root "
            #                 ">{params.log_dir}/accumulo-reset.out "
            #                 "2>{params.log_dir}/accumulo-reset.err"),
            #          not_if=as_user(format("cat {rpassfile} | "
            #                                "{params.daemon_script} shell -e "
            #                                "\"userpermissions -u root\" | "
            #                                "grep System.CREATE_TABLE"),
            #                         params.accumulo_user),
            #          user=params.accumulo_user)
            create_user(params.smoke_test_user, params.smoke_test_password)
        create_user(params.trace_user, params.trace_password)
        rpassfile = format("{params.exec_tmp_dir}/pass0")
        cmdfile = format("{params.exec_tmp_dir}/resetcmds")
        try:
            File(cmdfile,
                 mode=0600,
                 group=params.user_group,
                 owner=params.accumulo_user,
                 content=InlineTemplate(
                     'grant -t trace -u {{trace_user}} Table.ALTER_TABLE\n'
                     'grant -t trace -u {{trace_user}} Table.READ\n'
                     'grant -t trace -u {{trace_user}} Table.WRITE\n\n'))
            if params.security_enabled and params.has_secure_user_auth:
                Execute(format(
                    "{params.kinit_cmd} {params.daemon_script} shell -f "
                    "{cmdfile}"),
                        only_if=as_user(
                            format("{params.kinit_cmd} "
                                   "{params.daemon_script} shell "
                                   "-e \"table trace\""),
                            params.accumulo_user),
                        not_if=as_user(
                            format("{params.kinit_cmd} "
                                   "{params.daemon_script} shell "
                                   "-e \"userpermissions -u "
                                   "{params.trace_user} | "
                                   "grep Table.READ | grep trace"),
                            params.accumulo_user),
                        user=params.accumulo_user)
            else:
                File(rpassfile,
                     mode=0600,
                     group=params.user_group,
                     owner=params.accumulo_user,
                     content=InlineTemplate('{{root_password}}\n\n'))
                Execute(
                    format("cat {rpassfile} | {params.daemon_script} shell -f "
                           "{cmdfile} -u root"),
                    only_if=as_user(
                        format("cat {rpassfile} | "
                               "{params.daemon_script} shell -u root "
                               "-e \"table trace\""), params.accumulo_user),
                    not_if=as_user(
                        format("cat {rpassfile} | "
                               "{params.daemon_script} shell -u root "
                               "-e \"userpermissions -u "
                               "{params.trace_user} | "
                               "grep Table.READ | grep trace"),
                        params.accumulo_user),
                    user=params.accumulo_user)
        finally:
            try_remove(rpassfile)
            try_remove(cmdfile)
コード例 #6
0
ファイル: ams.py プロジェクト: wang7x/dfhz_hdp_mpack
def ams(name=None, action=None):
  import params

  if name == 'collector':
    Directory(params.ams_collector_conf_dir,
              owner=params.ams_user,
              group=params.user_group,
              create_parents = True,
              recursive_ownership = True,
    )
    
    Directory(params.ams_checkpoint_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True,
              recursive_ownership = True
    )

    new_ams_site = {}
    new_ams_site.update(params.config['configurations']['ams-site'])
    if params.clusterHostInfoDict:
      master_components = []
      slave_components = []
      components = dict(params.clusterHostInfoDict).keys()
      known_slave_components = ["nodemanager", "metrics_monitor", "datanode", "hbase_regionserver"]
      for component in components:
        if component and component.endswith("_hosts"):
          component_name = component[:-6]
        elif component and component.endswith("_host"):
          component_name = component[:-5]
        else:
          continue
        if component_name in known_slave_components:
          slave_components.append(component_name)
        else:
          master_components.append(component_name)

      if slave_components:
        new_ams_site['timeline.metrics.initial.configured.slave.components'] = ",".join(slave_components)
      if master_components:
        if 'ambari_server' not in master_components:
          master_components.append('ambari_server')
        new_ams_site['timeline.metrics.initial.configured.master.components'] = ",".join(master_components)

    hbase_total_heapsize_with_trailing_m = params.hbase_heapsize
    hbase_total_heapsize = int(hbase_total_heapsize_with_trailing_m[:-1]) * 1024 * 1024
    new_ams_site['hbase_total_heapsize'] = hbase_total_heapsize

    XmlConfig("ams-site.xml",
              conf_dir=params.ams_collector_conf_dir,
              configurations=new_ams_site,
              configuration_attributes=params.config['configurationAttributes']['ams-site'],
              owner=params.ams_user,
              group=params.user_group
    )

    XmlConfig("ssl-server.xml",
              conf_dir=params.ams_collector_conf_dir,
              configurations=params.config['configurations']['ams-ssl-server'],
              configuration_attributes=params.config['configurationAttributes']['ams-ssl-server'],
              owner=params.ams_user,
              group=params.user_group
    )

    merged_ams_hbase_site = {}
    merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
    if params.security_enabled:
      merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])

    # Add phoenix client side overrides
    merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = str(params.phoenix_max_global_mem_percent)
    merged_ams_hbase_site['phoenix.spool.directory'] = params.phoenix_client_spool_dir

    XmlConfig( "hbase-site.xml",
               conf_dir = params.ams_collector_conf_dir,
               configurations = merged_ams_hbase_site,
               configuration_attributes=params.config['configurationAttributes']['ams-hbase-site'],
               owner = params.ams_user,
               group = params.user_group
    )

    if params.security_enabled:
      TemplateConfig(os.path.join(params.hbase_conf_dir, "ams_collector_jaas.conf"),
                     owner = params.ams_user,
                     template_tag = None)

    if (params.log4j_props != None):
      File(format("{params.ams_collector_conf_dir}/log4j.properties"),
           mode=0644,
           group=params.user_group,
           owner=params.ams_user,
           content=InlineTemplate(params.log4j_props)
      )

    File(format("{ams_collector_conf_dir}/ams-env.sh"),
         owner=params.ams_user,
         content=InlineTemplate(params.ams_env_sh_template)
    )

    Directory(params.ams_collector_log_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True,
              mode=0755,
    )

    Directory(params.ams_collector_pid_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True,
              mode=0755,
    )

    # Hack to allow native HBase libs to be included for embedded hbase
    File(os.path.join(params.ams_hbase_home_dir, "bin", "hadoop"),
         owner=params.ams_user,
         mode=0755
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents = True,
              owner='root',
              group='root'
    )

    # Setting up security limits
    File(os.path.join(params.limits_conf_dir, 'ams.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("ams.conf.j2")
    )

    # Phoenix spool file dir if not /tmp
    if not os.path.exists(params.phoenix_client_spool_dir):
      Directory(params.phoenix_client_spool_dir,
                owner=params.ams_user,
                mode = 0755,
                group=params.user_group,
                cd_access="a",
                create_parents = True
      )
    pass

    if not params.is_local_fs_rootdir and params.is_ams_distributed:
      # Configuration needed to support NN HA
      XmlConfig("hdfs-site.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
            owner=params.ams_user,
            group=params.user_group,
            mode=0644
      )

      XmlConfig("hdfs-site.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
            owner=params.ams_user,
            group=params.user_group,
            mode=0644
      )

      # Remove spnego configs from core-site if platform does not have python-kerberos library
      truncated_core_site = {}
      truncated_core_site.update(params.config['configurations']['core-site'])
      if is_spnego_enabled(params):
        truncated_core_site.pop('hadoop.http.authentication.type')
        truncated_core_site.pop('hadoop.http.filter.initializers')

      # if there is the viewFS mount table content, create separate xml config and include in in the core-sites
      # else just create core-sites
      if params.mount_table_content:
        XmlConfig("core-site.xml",
                  conf_dir=params.ams_collector_conf_dir,
                  configurations=truncated_core_site,
                  configuration_attributes=params.config['configurationAttributes']['core-site'],
                  owner=params.ams_user,
                  group=params.user_group,
                  mode=0644,
                  xml_include_file=os.path.join(params.ams_collector_conf_dir, params.xml_inclusion_file_name)
        )

        File(os.path.join(params.ams_collector_conf_dir, params.xml_inclusion_file_name),
             owner=params.ams_user,
             group=params.user_group,
             content=params.mount_table_content,
             mode=0644
        )

        XmlConfig("core-site.xml",
                  conf_dir=params.hbase_conf_dir,
                  configurations=truncated_core_site,
                  configuration_attributes=params.config['configurationAttributes']['core-site'],
                  owner=params.ams_user,
                  group=params.user_group,
                  mode=0644,
                  xml_include_file=os.path.join(params.hbase_conf_dir, params.xml_inclusion_file_name)
        )

        File(os.path.join(params.hbase_conf_dir, params.xml_inclusion_file_name),
             owner=params.ams_user,
             group=params.user_group,
             content=params.mount_table_content,
             mode=0644
        )
      else:
        XmlConfig("core-site.xml",
                  conf_dir=params.ams_collector_conf_dir,
                  configurations=truncated_core_site,
                  configuration_attributes=params.config['configurationAttributes']['core-site'],
                  owner=params.ams_user,
                  group=params.user_group,
                  mode=0644
        )

        XmlConfig("core-site.xml",
                  conf_dir=params.hbase_conf_dir,
                  configurations=truncated_core_site,
                  configuration_attributes=params.config['configurationAttributes']['core-site'],
                  owner=params.ams_user,
                  group=params.user_group,
                  mode=0644
        )

    if params.metric_collector_https_enabled:
      export_ca_certs(params.ams_collector_conf_dir)

    pass

  elif name == 'monitor':

    # TODO Uncomment when SPNEGO support has been added to AMS service check and Grafana.
    if is_spnego_enabled(params) and is_redhat_centos_6_plus():
      try:
        import kerberos
      except ImportError:
        raise ImportError("python-kerberos package need to be installed to run AMS in SPNEGO mode")

    Directory(params.ams_monitor_conf_dir,
              owner=params.ams_user,
              group=params.user_group,
              create_parents = True
    )

    Directory(params.ams_monitor_log_dir,
              owner=params.ams_user,
              group=params.user_group,
              mode=0755,
              create_parents = True
    )

    if params.host_in_memory_aggregation and params.log4j_props is not None:
      File(format("{params.ams_monitor_conf_dir}/log4j.properties"),
           mode=0644,
           group=params.user_group,
           owner=params.ams_user,
           content=InlineTemplate(params.log4j_props)
           )

      XmlConfig("ams-site.xml",
              conf_dir=params.ams_monitor_conf_dir,
              configurations=params.config['configurations']['ams-site'],
              configuration_attributes=params.config['configurationAttributes']['ams-site'],
              owner=params.ams_user,
              group=params.user_group
              )
      XmlConfig("ssl-server.xml",
              conf_dir=params.ams_monitor_conf_dir,
              configurations=params.config['configurations']['ams-ssl-server'],
              configuration_attributes=params.config['configurationAttributes']['ams-ssl-server'],
              owner=params.ams_user,
              group=params.user_group
              )
      pass

    Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_log_dir}")
            )

    Directory(params.ams_monitor_pid_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              mode=0755,
              create_parents = True
    )

    Directory(format("{ams_monitor_dir}/psutil/build"),
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True)

    Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_dir}")
    )

    TemplateConfig(
      format("{ams_monitor_conf_dir}/metric_monitor.ini"),
      owner=params.ams_user,
      group=params.user_group,
      template_tag=None
    )

    TemplateConfig(
      format("{ams_monitor_conf_dir}/metric_groups.conf"),
      owner=params.ams_user,
      group=params.user_group,
      template_tag=None
    )

    File(format("{ams_monitor_conf_dir}/ams-env.sh"),
         owner=params.ams_user,
         content=InlineTemplate(params.ams_env_sh_template)
    )

    if params.metric_collector_https_enabled or params.is_aggregation_https_enabled:
      export_ca_certs(params.ams_monitor_conf_dir)

    pass
  elif name == 'grafana':

    ams_grafana_directories = [
                              params.ams_grafana_conf_dir,
                              params.ams_grafana_log_dir,
                              params.ams_grafana_data_dir,
                              params.ams_grafana_pid_dir
                              ]

    for ams_grafana_directory in ams_grafana_directories:
      Directory(ams_grafana_directory,
                owner=params.ams_user,
                group=params.user_group,
                mode=0755,
                create_parents = True,
                recursive_ownership = True
                )

    File(format("{ams_grafana_conf_dir}/ams-grafana-env.sh"),
         owner=params.ams_user,
         group=params.user_group,
         content=InlineTemplate(params.ams_grafana_env_sh_template)
         )

    File(format("{ams_grafana_conf_dir}/ams-grafana.ini"),
         owner=params.ams_user,
         group=params.user_group,
         content=InlineTemplate(params.ams_grafana_ini_template),
         mode=0600
         )

    if action != 'stop':
      for dir in ams_grafana_directories:
        Execute(('chown', '-R', params.ams_user, dir),
                sudo=True
                )

    if params.metric_collector_https_enabled:
      export_ca_certs(params.ams_grafana_conf_dir)

    pass
  generate_logfeeder_input_config('ambari-metrics', Template("input.config-ambari-metrics.json.j2", extra_imports=[default]))
コード例 #7
0
ファイル: hbase.py プロジェクト: tsingfu/bigdata
def hbase(name=None):
    import params

    Directory(params.etc_prefix_dir, mode=0755)

    Directory(params.hbase_conf_dir,
              owner=params.hbase_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.java_io_tmpdir, create_parents=True, mode=0777)

    # If a file location is specified in ioengine parameter,
    # ensure that directory exists. Otherwise create the
    # directory with permissions assigned to hbase:hadoop.
    ioengine_input = params.ioengine_param
    if ioengine_input != None:
        if ioengine_input.startswith("file:/"):
            ioengine_fullpath = ioengine_input[5:]
            ioengine_dir = os.path.dirname(ioengine_fullpath)
            Directory(ioengine_dir,
                      owner=params.hbase_user,
                      group=params.user_group,
                      create_parents=True,
                      mode=0755)

    parent_dir = os.path.dirname(params.tmp_dir)
    # In case if we have several placeholders in path
    while ("${" in parent_dir):
        parent_dir = os.path.dirname(parent_dir)
    if parent_dir != os.path.abspath(os.sep):
        Directory(
            parent_dir,
            create_parents=True,
            cd_access="a",
        )
        Execute(("chmod", "1777", parent_dir), sudo=True)

    XmlConfig(
        "hbase-site.xml",
        conf_dir=params.hbase_conf_dir,
        configurations=params.config['configurations']['hbase-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['hbase-site'],
        owner=params.hbase_user,
        group=params.user_group)

    XmlConfig(
        "core-site.xml",
        conf_dir=params.hbase_conf_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['core-site'],
        owner=params.hbase_user,
        group=params.user_group)
    if 'hdfs-site' in params.config['configurations']:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hdfs-site'],
            owner=params.hbase_user,
            group=params.user_group)

    if 'hbase-policy' in params.config['configurations']:
        XmlConfig(
            "hbase-policy.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hbase-policy'],
            configuration_attributes=params.config['configuration_attributes']
            ['hbase-policy'],
            owner=params.hbase_user,
            group=params.user_group)
    # Manually overriding ownership of file installed by hadoop package
    else:
        File(format("{params.hbase_conf_dir}/hbase-policy.xml"),
             owner=params.hbase_user,
             group=params.user_group)

    File(
        format("{hbase_conf_dir}/hbase-env.sh"),
        owner=params.hbase_user,
        content=InlineTemplate(params.hbase_env_sh_template),
        group=params.user_group,
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hbase.conf.j2"))

    hbase_TemplateConfig(
        params.metric_prop_file_name,
        tag='GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS')

    hbase_TemplateConfig('regionservers')

    if params.security_enabled:
        hbase_TemplateConfig(format("hbase_{name}_jaas.conf"))

    if name != "client":
        Directory(
            params.pid_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        Directory(
            params.log_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

    if (params.log4j_props != None):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user,
             content=InlineTemplate(params.log4j_props))
    elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user)
    if name == "master":
        params.HdfsResource(params.hbase_hdfs_root_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hbase_user)
        params.HdfsResource(params.hbase_staging_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hbase_user,
                            mode=0711)
        if params.create_hbase_home_directory:
            params.HdfsResource(params.hbase_home_directory,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hbase_user,
                                mode=0755)
        params.HdfsResource(None, action="execute")

    if params.phoenix_enabled:
        Package(params.phoenix_package,
                retry_on_repo_unavailability=params.
                agent_stack_retry_on_unavailability,
                retry_count=params.agent_stack_retry_count)
コード例 #8
0
def oozie_server_specific():
    import params

    no_op_test = as_user(format(
        "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"
    ),
                         user=params.oozie_user)

    File(params.pid_file, action="delete", not_if=no_op_test)

    oozie_server_directories = [
        format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir,
        params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir,
        params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_server_dir
    ]
    Directory(
        oozie_server_directories,
        owner=params.oozie_user,
        group=params.user_group,
        mode=0755,
        recursive=True,
        cd_access="a",
    )
    Directory(
        params.oozie_libext_dir,
        recursive=True,
    )

    ln_conf = ('ln', '-sfn', params.oozie_webapss_conf_target_dir,
               params.oozie_webapps_conf_dir)
    Execute(ln_conf)

    hashcode_file = format("{oozie_home}/.hashcode")
    hashcode = hashlib.md5(
        format('{oozie_home}/oozie-sharelib.tar.gz')).hexdigest()
    skip_recreate_sharelib = format(
        "test -f {hashcode_file} && test -d {oozie_home}/share && [[ `cat {hashcode_file}` == '{hashcode}' ]]"
    )

    untar_sharelib = ('tar', '-xvf',
                      format('{oozie_home}/oozie-sharelib.tar.gz'), '-C',
                      params.oozie_home)

    Execute(
        untar_sharelib,  # time-expensive
        not_if=format("{no_op_test} || {skip_recreate_sharelib}"),
        sudo=True,
    )

    configure_cmds = []
    configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
    configure_cmds.append(('chown', format('{oozie_user}:{user_group}'),
                           format('{oozie_libext_dir}/{ext_js_file}')))
    configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'),
                           params.oozie_webapps_conf_dir))

    Execute(
        configure_cmds,
        not_if=no_op_test,
        sudo=True,
    )

    # download the database JAR
    download_database_library_if_needed()

    #falcon el extension
    if params.has_falcon_host:
        Execute(format(
            '{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'
        ),
                not_if=no_op_test)

        Execute(format(
            '{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'
        ),
                not_if=no_op_test)

    if params.lzo_enabled and len(params.all_lzo_packages) > 0:
        Package(params.all_lzo_packages)
        Execute(
            format(
                '{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
            not_if=no_op_test,
        )

    prepare_war_cmd_file = format("{oozie_home}/.prepare_war_cmd")
    prepare_war_cmd = format(
        "cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure}")
    skip_prepare_war_cmd = format(
        "test -f {prepare_war_cmd_file} && [[ `cat {prepare_war_cmd_file}` == '{prepare_war_cmd}' ]]"
    )

    Execute(
        prepare_war_cmd,  # time-expensive
        user=params.oozie_user,
        not_if=format(
            "{no_op_test} || {skip_recreate_sharelib} && {skip_prepare_war_cmd}"
        ))
    File(
        hashcode_file,
        content=hashcode,
        mode=0644,
    )
    File(
        prepare_war_cmd_file,
        content=prepare_war_cmd,
        mode=0644,
    )

    if params.hdp_stack_version != "" and compare_versions(
            params.hdp_stack_version, '2.2') >= 0:
        # Create hive-site and tez-site configs for oozie
        Directory(params.hive_conf_dir,
                  recursive=True,
                  owner=params.oozie_user,
                  group=params.user_group)
        if 'hive-site' in params.config['configurations']:
            XmlConfig(
                "hive-site.xml",
                conf_dir=params.hive_conf_dir,
                configurations=params.config['configurations']['hive-site'],
                configuration_attributes=params.
                config['configuration_attributes']['hive-site'],
                owner=params.oozie_user,
                group=params.user_group,
                mode=0644)
        if 'tez-site' in params.config['configurations']:
            XmlConfig(
                "tez-site.xml",
                conf_dir=params.hive_conf_dir,
                configurations=params.config['configurations']['tez-site'],
                configuration_attributes=params.
                config['configuration_attributes']['tez-site'],
                owner=params.oozie_user,
                group=params.user_group,
                mode=0664)
    Execute(('chown', '-R', format("{oozie_user}:{user_group}"),
             params.oozie_server_dir),
            sudo=True)
コード例 #9
0
def oozie(is_server=False):
    import params

    if is_server:
        params.HdfsResource(params.oozie_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.oozie_user,
                            mode=params.oozie_hdfs_user_mode)
        params.HdfsResource(None, action="execute")
    Directory(params.conf_dir,
              recursive=True,
              owner=params.oozie_user,
              group=params.user_group)
    XmlConfig(
        "oozie-site.xml",
        conf_dir=params.conf_dir,
        configurations=params.oozie_site,
        configuration_attributes=params.config['configuration_attributes']
        ['oozie-site'],
        owner=params.oozie_user,
        group=params.user_group,
        mode=0664)
    File(
        format("{conf_dir}/oozie-env.sh"),
        owner=params.oozie_user,
        content=InlineTemplate(params.oozie_env_sh_template),
        group=params.user_group,
    )

    if (params.log4j_props != None):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=params.log4j_props)
    elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user)

    File(format("{params.conf_dir}/adminusers.txt"),
         mode=0644,
         group=params.user_group,
         owner=params.oozie_user,
         content=Template('adminusers.txt.j2',
                          oozie_admin_users=params.oozie_admin_users))

    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
       params.jdbc_driver_name == "org.postgresql.Driver" or \
       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
        )
    pass

    oozie_ownership()

    if is_server:
        oozie_server_specific()
コード例 #10
0
ファイル: kms.py プロジェクト: prelongs/ambari
def kms():
    import params

    if params.has_ranger_admin:

        File(params.downloaded_connector_path,
             content=DownloadSource(params.driver_source))

        if not os.path.isfile(params.driver_target):
            Execute(('cp', '--remove-destination',
                     params.downloaded_connector_path, params.driver_target),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

        Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF',
                               'classes', 'lib'),
                  mode=0755,
                  owner=params.kms_user,
                  group=params.kms_group)

        Execute(('cp', format('{kms_home}/ranger-kms-initd'),
                 '/etc/init.d/ranger-kms'),
                not_if=format('ls /etc/init.d/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File('/etc/init.d/ranger-kms', mode=0755)

        Execute(('chown', '-R', format('{kms_user}:{kms_group}'),
                 format('{kms_home}/')),
                sudo=True)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms'),
                not_if=format('ls /usr/bin/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms-services.sh'),
                not_if=format('ls /usr/bin/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms-services.sh', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms-initd'),
                 format('{kms_home}/ranger-kms-services.sh')),
                not_if=format('ls {kms_home}/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File(format('{kms_home}/ranger-kms-services.sh'), mode=0755)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775)

        do_keystore_setup(params.credential_provider_path, params.jdbc_alias,
                          params.db_password)
        do_keystore_setup(params.credential_provider_path,
                          params.masterkey_alias,
                          params.kms_master_key_password)

        XmlConfig(
            "dbks-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['dbks-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['dbks-site'],
            owner=params.kms_user,
            group=params.kms_group)

        XmlConfig(
            "ranger-kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-site'],
            owner=params.kms_user,
            group=params.kms_group)

        XmlConfig(
            "kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['kms-site'],
            owner=params.kms_user,
            group=params.kms_group)

        File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
             owner=params.kms_user,
             group=params.kms_group,
             content=params.kms_log4j)
コード例 #11
0
ファイル: kms.py プロジェクト: prelongs/ambari
def enable_kms_plugin():

    import params

    if params.has_ranger_admin:

        ranger_adm_obj = Rangeradmin(url=params.policymgr_mgr_url)
        response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(
            params.policymgr_mgr_url + '/login.jsp', 'test:test')
        if response_code is not None and response_code == 200:
            ambari_ranger_admin, ambari_ranger_password = ranger_adm_obj.create_ambari_admin_user(
                params.ambari_ranger_admin, params.ambari_ranger_password,
                params.admin_uname_password)
            ambari_username_password_for_ranger = ambari_ranger_admin + ':' + ambari_ranger_password
        else:
            raise Fail('Ranger service is not started on given host')

        if ambari_ranger_admin != '' and ambari_ranger_password != '':
            get_repo_flag = get_repo(params.policymgr_mgr_url,
                                     params.repo_name,
                                     ambari_username_password_for_ranger)
            if not get_repo_flag:
                create_repo(params.policymgr_mgr_url,
                            json.dumps(params.kms_ranger_plugin_repo),
                            ambari_username_password_for_ranger)
        else:
            raise Fail('Ambari admin username and password not available')

        current_datetime = datetime.now()

        File(
            format('{kms_conf_dir}/ranger-security.xml'),
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644,
            content=InlineTemplate(
                format(
                    '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'
                )))

        Directory([
            os.path.join('/etc', 'ranger', params.repo_name),
            os.path.join('/etc', 'ranger', params.repo_name, 'policycache')
        ],
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775,
                  recursive=True)

        File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',
                          format('kms_{repo_name}.json')),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644)

        XmlConfig(
            "ranger-kms-audit.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-audit'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-audit'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-kms-security.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-security'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-security'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-policymgr-ssl.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-policymgr-ssl'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-policymgr-ssl'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        if params.xa_audit_db_is_enabled:
            cred_setup = params.cred_setup_prefix + (
                '-f', params.credential_file, '-k', 'auditDBCred', '-v',
                params.xa_audit_db_password, '-c', '1')
            Execute(cred_setup,
                    environment={'JAVA_HOME': params.java_home},
                    logoutput=True,
                    sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslKeyStore', '-v',
            params.ssl_keystore_password, '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslTrustStore', '-v',
            params.ssl_truststore_password, '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        File(params.credential_file,
             owner=params.kms_user,
             group=params.kms_group)
コード例 #12
0
def knox():
    import params

    directories = [params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir, params.knox_conf_dir, os.path.join(params.knox_conf_dir, "topologies")]
    for directory in directories:
      Directory(directory,
                owner = params.knox_user,
                group = params.knox_group,
                recursive = True
      )

    XmlConfig("gateway-site.xml",
              conf_dir=params.knox_conf_dir,
              configurations=params.config['configurations']['gateway-site'],
              configuration_attributes=params.config['configuration_attributes']['gateway-site'],
              owner=params.knox_user,
              group=params.knox_group,
    )

    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
         mode=0644,
         group=params.knox_group,
         owner=params.knox_user,
         content=params.gateway_log4j
    )

    File(format("{params.knox_conf_dir}/topologies/default.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.topology_template)
    )
    File(format("{params.knox_conf_dir}/topologies/admin.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.admin_topology_template)
    )
    if params.security_enabled:
      TemplateConfig( format("{knox_conf_dir}/krb5JAASLogin.conf"),
                      owner = params.knox_user,
                      template_tag = None
      )

    dirs_to_chown = tuple(directories)
    cmd = ('chown','-R',format('{knox_user}:{knox_group}')) + dirs_to_chown
    Execute(cmd,
            sudo = True,
    )

    cmd = format('{knox_client_bin} create-master --master {knox_master_secret!p}')
    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'), params.knox_user)

    Execute(cmd,
            user=params.knox_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=master_secret_exist,
    )

    cmd = format('{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'), params.knox_user)

    Execute(cmd,
            user=params.knox_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=cert_store_exist,
    )
コード例 #13
0
def setup_tagsync_ssl_configs():
    import params
    Directory(params.security_store_path, cd_access="a", create_parents=True)

    Directory(params.tagsync_etc_path,
              cd_access="a",
              owner=params.unix_user,
              group=params.unix_group,
              mode=0775,
              create_parents=True)

    # remove plain-text password from xml configs
    ranger_tagsync_policymgr_ssl_copy = {}
    ranger_tagsync_policymgr_ssl_copy.update(
        params.config['configurations']['ranger-tagsync-policymgr-ssl'])
    for prop in params.ranger_tagsync_password_properties:
        if prop in ranger_tagsync_policymgr_ssl_copy:
            ranger_tagsync_policymgr_ssl_copy[prop] = "_"

    XmlConfig(
        "ranger-policymgr-ssl.xml",
        conf_dir=params.ranger_tagsync_conf,
        configurations=ranger_tagsync_policymgr_ssl_copy,
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-tagsync-policymgr-ssl'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore',
                             params.ranger_tagsync_keystore_password,
                             params.ranger_tagsync_credential_file)
    ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore',
                             params.ranger_tagsync_truststore_password,
                             params.ranger_tagsync_credential_file)

    File(params.ranger_tagsync_credential_file,
         owner=params.unix_user,
         group=params.unix_group,
         mode=0640)

    # remove plain-text password from xml configs
    atlas_tagsync_ssl_copy = {}
    atlas_tagsync_ssl_copy.update(
        params.config['configurations']['atlas-tagsync-ssl'])
    for prop in params.ranger_tagsync_password_properties:
        if prop in atlas_tagsync_ssl_copy:
            atlas_tagsync_ssl_copy[prop] = "_"

    XmlConfig(
        "atlas-tagsync-ssl.xml",
        conf_dir=params.ranger_tagsync_conf,
        configurations=atlas_tagsync_ssl_copy,
        configuration_attributes=params.config['configuration_attributes']
        ['atlas-tagsync-ssl'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    ranger_credential_helper(params.tagsync_cred_lib, 'sslKeyStore',
                             params.atlas_tagsync_keystore_password,
                             params.atlas_tagsync_credential_file)
    ranger_credential_helper(params.tagsync_cred_lib, 'sslTrustStore',
                             params.atlas_tagsync_truststore_password,
                             params.atlas_tagsync_credential_file)

    File(params.atlas_tagsync_credential_file,
         owner=params.unix_user,
         group=params.unix_group,
         mode=0640)
    Logger.info("Configuring tagsync-ssl configurations done successfully.")
コード例 #14
0
def setup_tagsync(upgrade_type=None):
    import params

    ranger_tagsync_home = params.ranger_tagsync_home
    ranger_home = params.ranger_home
    ranger_tagsync_conf = params.ranger_tagsync_conf

    Directory(format("{ranger_tagsync_conf}"),
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    Directory(
        params.ranger_pid_dir,
        mode=0755,
        create_parents=True,
        owner=params.unix_user,
        group=params.user_group,
        cd_access="a",
    )

    if params.stack_supports_pid:
        File(
            format('{ranger_tagsync_conf}/ranger-tagsync-env-piddir.sh'),
            content=format(
                "export TAGSYNC_PID_DIR_PATH={ranger_pid_dir}\nexport UNIX_TAGSYNC_USER={unix_user}"
            ),
            owner=params.unix_user,
            group=params.unix_group,
            mode=0755)

    Directory(params.tagsync_log_dir,
              create_parents=True,
              owner=params.unix_user,
              group=params.unix_group,
              cd_access="a",
              mode=0755)

    File(format('{ranger_tagsync_conf}/ranger-tagsync-env-logdir.sh'),
         content=format("export RANGER_TAGSYNC_LOG_DIR={tagsync_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    XmlConfig(
        "ranger-tagsync-site.xml",
        conf_dir=ranger_tagsync_conf,
        configurations=params.config['configurations']['ranger-tagsync-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-tagsync-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)
    if params.stack_supports_ranger_tagsync_ssl_xml_support:
        Logger.info(
            "Stack supports tagsync-ssl configurations, performing the same.")
        setup_tagsync_ssl_configs()
    else:
        Logger.info(
            "Stack doesnt support tagsync-ssl configurations, skipping the same."
        )

    PropertiesFile(
        format('{ranger_tagsync_conf}/atlas-application.properties'),
        properties=params.tagsync_application_properties,
        mode=0755,
        owner=params.unix_user,
        group=params.unix_group)

    File(format('{ranger_tagsync_conf}/log4j.properties'),
         owner=params.unix_user,
         group=params.unix_group,
         content=InlineTemplate(params.tagsync_log4j),
         mode=0644)

    File(
        params.tagsync_services_file,
        mode=0755,
    )

    Execute(('ln', '-sf', format('{tagsync_services_file}'),
             '/usr/bin/ranger-tagsync'),
            not_if=format("ls /usr/bin/ranger-tagsync"),
            only_if=format("ls {tagsync_services_file}"),
            sudo=True)

    create_core_site_xml(ranger_tagsync_conf)
コード例 #15
0
def setup_spark(env, type, upgrade_type=None, action=None):
    import params

    Directory([params.spark_pid_dir, params.spark_log_dir],
              owner=params.spark_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if type == 'server' and action == 'config':
        params.HdfsResource(params.spark_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.spark_user,
                            mode=0775)
        params.HdfsResource(None, action="execute")

    PropertiesFile(
        format("{spark_conf}/spark-defaults.conf"),
        properties=params.config['configurations']['spark2-defaults'],
        key_value_delimiter=" ",
        owner=params.spark_user,
        group=params.spark_group,
        mode=0644)

    # create spark-env.sh in etc/conf dir
    File(
        os.path.join(params.spark_conf, 'spark-env.sh'),
        owner=params.spark_user,
        group=params.spark_group,
        content=InlineTemplate(params.spark_env_sh),
        mode=0644,
    )

    #create log4j.properties in etc/conf dir
    File(
        os.path.join(params.spark_conf, 'log4j.properties'),
        owner=params.spark_user,
        group=params.spark_group,
        content=params.spark_log4j_properties,
        mode=0644,
    )

    #create metrics.properties in etc/conf dir
    File(os.path.join(params.spark_conf, 'metrics.properties'),
         owner=params.spark_user,
         group=params.spark_group,
         content=InlineTemplate(params.spark_metrics_properties),
         mode=0644)

    if params.is_hive_installed:
        XmlConfig("hive-site.xml",
                  conf_dir=params.spark_conf,
                  configurations=params.spark_hive_properties,
                  owner=params.spark_user,
                  group=params.spark_group,
                  mode=0644)

    if params.has_spark_thriftserver:
        PropertiesFile(params.spark_thrift_server_conf_file,
                       properties=params.config['configurations']
                       ['spark2-thrift-sparkconf'],
                       owner=params.hive_user,
                       group=params.user_group,
                       key_value_delimiter=" ",
                       mode=0644)

    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
    if effective_version:
        effective_version = format_stack_version(effective_version)

    if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(
            StackFeature.SPARK_16PLUS, effective_version):
        # create spark-thrift-fairscheduler.xml
        File(os.path.join(params.spark_conf, "spark-thrift-fairscheduler.xml"),
             owner=params.spark_user,
             group=params.spark_group,
             mode=0755,
             content=InlineTemplate(params.spark_thrift_fairscheduler_content))
コード例 #16
0
def knox():
    import params
    Directory([params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir, params.knox_conf_dir, os.path.join(params.knox_conf_dir, "topologies")],
              owner = params.knox_user,
              group = params.knox_group,
              create_parents = True,
              cd_access = "a",
              mode = 0755,
              recursive_ownership = True,
    )

    XmlConfig("gateway-site.xml",
              conf_dir=params.knox_conf_dir,
              configurations=params.config['configurations']['gateway-site'],
              configuration_attributes=params.config['configuration_attributes']['gateway-site'],
              owner=params.knox_user,
              group=params.knox_group,
    )

    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
         mode=0644,
         group=params.knox_group,
         owner=params.knox_user,
         content=params.gateway_log4j
    )

    File(format("{params.knox_conf_dir}/topologies/default.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.topology_template)
    )
    File(format("{params.knox_conf_dir}/topologies/admin.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.admin_topology_template)
    )

    if params.version_formatted and check_stack_feature(StackFeature.KNOX_SSO_TOPOLOGY, params.version_formatted):
        File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
            group=params.knox_group,
            owner=params.knox_user,
            content=InlineTemplate(params.knoxsso_topology_template)
        )


    if params.security_enabled:
      TemplateConfig( format("{knox_conf_dir}/krb5JAASLogin.conf"),
                      owner = params.knox_user,
                      template_tag = None
      )

    cmd = format('{knox_client_bin} create-master --master {knox_master_secret!p}')
    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'), params.knox_user)

    Execute(cmd,
            user=params.knox_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=master_secret_exist,
    )

    cmd = format('{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'), params.knox_user)

    Execute(cmd,
            user=params.knox_user,
            environment={'JAVA_HOME': params.java_home},
            not_if=cert_store_exist,
    )
コード例 #17
0
ファイル: kms.py プロジェクト: soener/ambari
def kms():
  import params

  if params.has_ranger_admin:

    File(params.downloaded_custom_connector,
      content = DownloadSource(params.driver_curl_source)
    )

    Directory(params.java_share_dir,
      mode=0755
    )

    if not os.path.isfile(params.driver_curl_target):
      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.driver_curl_target),
              path=["/bin", "/usr/bin/"],
              sudo=True)

    XmlConfig("kms-acls.xml",
      conf_dir=params.kms_config_dir,
      configurations=params.config['configurations']['kms-acls'],
      configuration_attributes=params.config['configuration_attributes']['kms-acls'],
      owner=params.kms_user,
      group=params.kms_group
    )

    XmlConfig("kms-site.xml",
      conf_dir=params.kms_config_dir,
      configurations=params.config['configurations']['kms-site'],
      configuration_attributes=params.config['configuration_attributes']['kms-site'],
      owner=params.kms_user,
      group=params.kms_group
    )

    File(os.path.join(params.kms_config_dir, "kms-log4j.properties"),
      owner=params.kms_user,
      group=params.kms_group,
      content=params.kms_log4j
    )

    repo_data = kms_repo_properties()

    ranger_adm_obj = Rangeradmin(url=params.policymgr_mgr_url)
    response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(params.policymgr_mgr_url + '/login.jsp', 'test:test')
    if response_code is not None and response_code == 200:
      ambari_ranger_admin, ambari_ranger_password = ranger_adm_obj.create_ambari_admin_user(params.ambari_ranger_admin, params.ambari_ranger_password, params.admin_uname_password)
      ambari_username_password_for_ranger = ambari_ranger_admin + ':' + ambari_ranger_password
    else:
      raise Fail('Ranger service is not started on given host')      

    if ambari_ranger_admin != '' and ambari_ranger_password != '':  
      get_repo_flag = get_repo(params.policymgr_mgr_url, params.repo_name, ambari_username_password_for_ranger)
      if not get_repo_flag:
        create_repo(params.policymgr_mgr_url, repo_data, ambari_username_password_for_ranger)
    else:
      raise Fail('Ambari admin username and password not available')

    file_path = format('{kms_home}/install.properties')
    ranger_kms_dict = ranger_kms_properties()
    write_properties_to_file(file_path, ranger_kms_dict)

    env_dict = {'JAVA_HOME': params.java_home, 'RANGER_HOME': params.kms_home}
    setup_sh = format("cd {kms_home} && ") + as_sudo([format('{kms_home}/setup.sh')])
    Execute(setup_sh, environment=env_dict, logoutput=True)
コード例 #18
0
ファイル: oozie.py プロジェクト: Flipkart/ambari
def oozie_server_specific():
    import params

    no_op_test = as_user(format(
        "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"
    ),
                         user=params.oozie_user)
    File(params.pid_file, action="delete", not_if=no_op_test)

    oozie_server_directories = [
        format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir,
        params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir,
        params.oozie_lib_dir, params.oozie_webapps_dir,
        params.oozie_webapps_conf_dir, params.oozie_server_dir
    ]
    Directory(
        oozie_server_directories,
        owner=params.oozie_user,
        group=params.user_group,
        mode=0755,
        create_parents=True,
        cd_access="a",
    )

    Directory(
        params.oozie_libext_dir,
        create_parents=True,
    )

    hashcode_file = format("{oozie_home}/.hashcode")
    hashcode = hashlib.md5(
        format('{oozie_home}/oozie-sharelib.tar.gz')).hexdigest()
    skip_recreate_sharelib = format(
        "test -f {hashcode_file} && test -d {oozie_home}/share && [[ `cat {hashcode_file}` == '{hashcode}' ]]"
    )

    untar_sharelib = ('tar', '-xvf',
                      format('{oozie_home}/oozie-sharelib.tar.gz'), '-C',
                      params.oozie_home)

    Execute(
        untar_sharelib,  # time-expensive
        not_if=format("{no_op_test} || {skip_recreate_sharelib}"),
        sudo=True,
    )
    configure_cmds = []
    #configure_cmds.append(('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home))
    #configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
    #configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
    configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'),
                           params.oozie_webapps_conf_dir))

    Execute(
        configure_cmds,
        not_if=no_op_test,
        sudo=True,
    )

    if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
        File(
            params.downloaded_custom_connector,
            content=DownloadSource(params.driver_curl_source),
        )

        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             params.target),
            #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
            path=["/bin", "/usr/bin/"],
            sudo=True)

        File(params.target, owner=params.oozie_user, group=params.user_group)

    #falcon el extension
    if params.has_falcon_host:
        Execute(
            format('rm -rf {oozie_libext_dir}/falcon-oozie-el-extension.jar'),
        )
        if params.security_enabled:
            Execute(
                format(
                    '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab {hdfs_principal_name}'
                ))
        Execute(
            format(
                'hadoop fs -get /user/falcon/temp/falcon-oozie-el-extension.jar {oozie_libext_dir}'
            ),
            not_if=no_op_test,
        )
        Execute(
            format(
                '{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension.jar'
            ),
            not_if=no_op_test,
        )

    prepare_war_cmd_file = format("{oozie_home}/.prepare_war_cmd")
    prepare_war_cmd = format(
        "cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure}")
    skip_prepare_war_cmd = format(
        "test -f {prepare_war_cmd_file} && [[ `cat {prepare_war_cmd_file}` == '{prepare_war_cmd}' ]]"
    )

    Execute(
        prepare_war_cmd,  # time-expensive
        user=params.oozie_user,
        not_if=format(
            "{no_op_test} || {skip_recreate_sharelib} && {skip_prepare_war_cmd}"
        ))
    File(
        hashcode_file,
        content=hashcode,
        mode=0644,
    )
    File(
        prepare_war_cmd_file,
        content=prepare_war_cmd,
        mode=0644,
    )

    # Create hive-site and tez-site configs for oozie
    Directory(params.hive_conf_dir,
              create_parents=True,
              owner=params.oozie_user,
              group=params.user_group)
    if 'hive-site' in params.config['configurations']:
        XmlConfig(
            "hive-site.xml",
            conf_dir=params.hive_conf_dir,
            configurations=params.config['configurations']['hive-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hive-site'],
            owner=params.oozie_user,
            group=params.user_group,
            mode=0640)
    '''if 'tez-site' in params.config['configurations']:
      XmlConfig( "tez-site.xml",
        conf_dir = params.hive_conf_dir,
        configurations = params.config['configurations']['tez-site'],
        configuration_attributes=params.config['configuration_attributes']['tez-site'],
        owner = params.oozie_user,
        group = params.user_group,
        mode = 0664
  )'''
    Execute(('chown', '-R', format("{oozie_user}:{user_group}"),
             params.oozie_server_dir),
            sudo=True)
コード例 #19
0
ファイル: ams.py プロジェクト: wang7x/dfhz_hdp_mpack
def ams(name=None):
  import params
  if name == 'collector':
    if not check_windows_service_exists(params.ams_collector_win_service_name):
      Execute(format("cmd /C cd {ams_collector_home_dir} & ambari-metrics-collector.cmd setup"))

    Directory(params.ams_collector_conf_dir,
              owner=params.ams_user,
              create_parents = True
    )

    Directory(params.ams_checkpoint_dir,
              owner=params.ams_user,
              create_parents = True
    )

    XmlConfig("ams-site.xml",
              conf_dir=params.ams_collector_conf_dir,
              configurations=params.config['configurations']['ams-site'],
              configuration_attributes=params.config['configurationAttributes']['ams-site'],
              owner=params.ams_user,
    )

    merged_ams_hbase_site = {}
    merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
    if params.security_enabled:
      merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])

    XmlConfig( "hbase-site.xml",
               conf_dir = params.ams_collector_conf_dir,
               configurations = merged_ams_hbase_site,
               configuration_attributes=params.config['configurationAttributes']['ams-hbase-site'],
               owner = params.ams_user,
    )

    if (params.log4j_props != None):
      File(os.path.join(params.ams_collector_conf_dir, "log4j.properties"),
           owner=params.ams_user,
           content=params.log4j_props
      )

    File(os.path.join(params.ams_collector_conf_dir, "ams-env.cmd"),
         owner=params.ams_user,
         content=InlineTemplate(params.ams_env_sh_template)
    )

    ServiceConfig(params.ams_collector_win_service_name,
                  action="change_user",
                  username = params.ams_user,
                  password = Script.get_password(params.ams_user))

    if not params.is_local_fs_rootdir:
      # Configuration needed to support NN HA
      XmlConfig("hdfs-site.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
            owner=params.ams_user,
            group=params.user_group,
            mode=0644
      )

      XmlConfig("hdfs-site.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
            owner=params.ams_user,
            group=params.user_group,
            mode=0644
      )

      XmlConfig("core-site.xml",
                conf_dir=params.ams_collector_conf_dir,
                configurations=params.config['configurations']['core-site'],
                configuration_attributes=params.config['configurationAttributes']['core-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644
      )

      XmlConfig("core-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=params.config['configurations']['core-site'],
                configuration_attributes=params.config['configurationAttributes']['core-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644
      )

    else:
      ServiceConfig(params.ams_embedded_hbase_win_service_name,
                    action="change_user",
                    username = params.ams_user,
                    password = Script.get_password(params.ams_user))
      # creating symbolic links on ams jars to make them available to services
      links_pairs = [
        ("%COLLECTOR_HOME%\\hbase\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
         "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
        ]
      for link_pair in links_pairs:
        link, target = link_pair
        real_link = os.path.expandvars(link)
        target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
        if not os.path.exists(real_link):
          #TODO check the symlink destination too. Broken in Python 2.x on Windows.
          Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))
    pass

  elif name == 'monitor':
    if not check_windows_service_exists(params.ams_monitor_win_service_name):
      Execute(format("cmd /C cd {ams_monitor_home_dir} & ambari-metrics-monitor.cmd setup"))

    # creating symbolic links on ams jars to make them available to services
    links_pairs = [
      ("%HADOOP_HOME%\\share\\hadoop\\common\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
       "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
      ("%HBASE_HOME%\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
       "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
    ]
    for link_pair in links_pairs:
      link, target = link_pair
      real_link = os.path.expandvars(link)
      target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
      if not os.path.exists(real_link):
        #TODO check the symlink destination too. Broken in Python 2.x on Windows.
        Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))

    Directory(params.ams_monitor_conf_dir,
              owner=params.ams_user,
              create_parents = True
    )

    if params.host_in_memory_aggregation:
      if params.log4j_props is not None:
        File(os.path.join(params.ams_monitor_conf_dir, "log4j.properties"),
             owner=params.ams_user,
             content=params.log4j_props
             )
        pass

      XmlConfig("ams-site.xml",
                conf_dir=params.ams_monitor_conf_dir,
                configurations=params.config['configurations']['ams-site'],
                configuration_attributes=params.config['configurationAttributes']['ams-site'],
                owner=params.ams_user,
                group=params.user_group
                )

      XmlConfig("ssl-server.xml",
              conf_dir=params.ams_monitor_conf_dir,
              configurations=params.config['configurations']['ams-ssl-server'],
              configuration_attributes=params.config['configurationAttributes']['ams-ssl-server'],
              owner=params.ams_user,
              group=params.user_group
              )
      pass

    TemplateConfig(
      os.path.join(params.ams_monitor_conf_dir, "metric_monitor.ini"),
      owner=params.ams_user,
      template_tag=None
    )

    TemplateConfig(
      os.path.join(params.ams_monitor_conf_dir, "metric_groups.conf"),
      owner=params.ams_user,
      template_tag=None
    )

    ServiceConfig(params.ams_monitor_win_service_name,
                  action="change_user",
                  username = params.ams_user,
                  password = Script.get_password(params.ams_user))
コード例 #20
0
ファイル: oozie.py プロジェクト: Flipkart/ambari
def oozie(is_server=False):
    import params

    if is_server:
        params.HdfsResource(params.oozie_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.oozie_user,
                            mode=params.oozie_hdfs_user_mode)
        params.HdfsResource(None, action="execute")

    Directory(params.conf_dir,
              create_parents=True,
              owner=params.oozie_user,
              group=params.user_group)
    XmlConfig(
        "oozie-site.xml",
        conf_dir=params.conf_dir,
        configurations=params.oozie_site,
        configuration_attributes=params.config['configuration_attributes']
        ['oozie-site'],
        owner=params.oozie_user,
        group=params.user_group,
        mode=0660)
    File(
        format("{conf_dir}/oozie-env.sh"),
        owner=params.oozie_user,
        content=InlineTemplate(params.oozie_env_sh_template),
        group=params.user_group,
    )

    if (params.log4j_props != None):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=params.log4j_props)
    elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user)

    File(format("{params.conf_dir}/adminusers.txt"),
         mode=0644,
         group=params.user_group,
         owner=params.oozie_user,
         content=Template('adminusers.txt.j2', oozie_user=params.oozie_user))

    environment = {"no_proxy": format("{ambari_server_hostname}")}

    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name == "org.postgresql.Driver" or \
       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
        )
    pass

    oozie_ownership()

    if params.lzo_enabled:
        install_lzo_if_needed()
        Execute(
            format(
                '{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
            not_if=no_op_test,
        )

    if is_server:
        oozie_server_specific()
コード例 #21
0
ファイル: oozie.py プロジェクト: Liujinan001/ambari-2.7.5
def oozie(is_server=False, upgrade_type=None):
  import params

  if is_server:
    params.HdfsResource(params.oozie_hdfs_user_dir,
                         type="directory",
                         action="create_on_execute",
                         owner=params.oozie_user,
                         mode=params.oozie_hdfs_user_mode
    )
    params.HdfsResource(None, action="execute")

    generate_logfeeder_input_config('oozie', Template("input.config-oozie.json.j2", extra_imports=[default]))
  Directory(params.conf_dir,
             create_parents = True,
             owner = params.oozie_user,
             group = params.user_group
  )

  params.oozie_site = update_credential_provider_path(params.oozie_site,
                                                      'oozie-site',
                                                      os.path.join(params.conf_dir, 'oozie-site.jceks'),
                                                      params.oozie_user,
                                                      params.user_group,
                                                      use_local_jceks=True
                                                      )

  XmlConfig("oozie-site.xml",
    conf_dir = params.conf_dir,
    configurations = params.oozie_site,
    configuration_attributes=params.config['configurationAttributes']['oozie-site'],
    owner = params.oozie_user,
    group = params.user_group,
    mode = 0664
  )
  File(format("{conf_dir}/oozie-env.sh"),
    owner=params.oozie_user,
    content=InlineTemplate(params.oozie_env_sh_template),
    group=params.user_group,
  )

  # On some OS this folder could be not exists, so we will create it before pushing there files
  Directory(params.limits_conf_dir,
            create_parents=True,
            owner='root',
            group='root'
  )

  File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
       owner='root',
       group='root',
       mode=0644,
       content=Template("oozie.conf.j2")
  )

  if (params.log4j_props != None):
    File(format("{params.conf_dir}/oozie-log4j.properties"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user,
      content=InlineTemplate(params.log4j_props)
    )
  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
    File(format("{params.conf_dir}/oozie-log4j.properties"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user
    )

  if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
    File(format("{params.conf_dir}/adminusers.txt"),
      mode=0644,
      group=params.user_group,
      owner=params.oozie_user,
      content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users)
    )
  else:
    File ( format("{params.conf_dir}/adminusers.txt"),
           owner = params.oozie_user,
           group = params.user_group
    )

  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
     params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
     params.jdbc_driver_name == "org.postgresql.Driver" or \
     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
      content = DownloadSource(format("{jdk_location}/{check_db_connection_jar_name}")),
    )
  pass

  oozie_ownership()
  
  if params.lzo_enabled:
    install_lzo_if_needed()
    Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
    )
  
  if is_server:
    oozie_server_specific(upgrade_type)
コード例 #22
0
ファイル: kms.py プロジェクト: pravin-dsilva/HDP2.5-ambari
def kms(upgrade_type=None):
    import params

    if params.has_ranger_admin:

        Directory(params.kms_conf_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  create_parents=True)

        copy_jdbc_connector()

        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
            mode=0644,
        )

        cp = format("{check_db_connection_jar}")
        if params.db_flavor.lower() == 'sqla':
            cp = cp + os.pathsep + format(
                "{kms_home}/ews/webapp/lib/sajdbc4.jar")
        else:
            path_to_jdbc = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
            if not os.path.isfile(path_to_jdbc):
                path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + \
                               params.default_connectors_map[params.db_flavor.lower()] if params.db_flavor.lower() in params.default_connectors_map else None
                if not os.path.isfile(path_to_jdbc):
                    path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + "*"
                    error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.db_flavor] + \
                          " in ranger kms lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
                    Logger.error(error_message)

            cp = cp + os.pathsep + path_to_jdbc

        db_connection_check_command = format(
            "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_kms_jdbc_connection_url}' {db_user} {db_password!p} {ranger_kms_jdbc_driver}"
        )

        env_dict = {}
        if params.db_flavor.lower() == 'sqla':
            env_dict = {'LD_LIBRARY_PATH': params.ld_library_path}

        Execute(db_connection_check_command,
                path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
                tries=5,
                try_sleep=10,
                environment=env_dict)

        if params.xa_audit_db_is_enabled and params.driver_source is not None and not params.driver_source.endswith(
                "/None"):
            if params.xa_previous_jdbc_jar and os.path.isfile(
                    params.xa_previous_jdbc_jar):
                File(params.xa_previous_jdbc_jar, action='delete')

            File(params.downloaded_connector_path,
                 content=DownloadSource(params.driver_source),
                 mode=0644)

            Execute(('cp', '--remove-destination',
                     params.downloaded_connector_path, params.driver_target),
                    path=["/bin", "/usr/bin/"],
                    sudo=True)

            File(params.driver_target, mode=0644)

        Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF',
                               'classes', 'lib'),
                  mode=0755,
                  owner=params.kms_user,
                  group=params.kms_group)

        Execute(('cp', format('{kms_home}/ranger-kms-initd'),
                 '/etc/init.d/ranger-kms'),
                not_if=format('ls /etc/init.d/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File('/etc/init.d/ranger-kms', mode=0755)

        Directory(
            format('{kms_home}/'),
            owner=params.kms_user,
            group=params.kms_group,
            recursive_ownership=True,
        )

        Directory(params.ranger_kms_pid_dir,
                  mode=0755,
                  owner=params.kms_user,
                  group=params.user_group,
                  cd_access="a",
                  create_parents=True)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  cd_access='a',
                  create_parents=True,
                  mode=0755)

        File(format('{kms_conf_dir}/ranger-kms-env-logdir.sh'),
             content=format("export RANGER_KMS_LOG_DIR={kms_log_dir}"),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms'),
                not_if=format('ls /usr/bin/ranger-kms'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms'),
                 '/usr/bin/ranger-kms-services.sh'),
                not_if=format('ls /usr/bin/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms'),
                sudo=True)

        File('/usr/bin/ranger-kms-services.sh', mode=0755)

        Execute(('ln', '-sf', format('{kms_home}/ranger-kms-initd'),
                 format('{kms_home}/ranger-kms-services.sh')),
                not_if=format('ls {kms_home}/ranger-kms-services.sh'),
                only_if=format('ls {kms_home}/ranger-kms-initd'),
                sudo=True)

        File(format('{kms_home}/ranger-kms-services.sh'), mode=0755)

        Directory(params.kms_log_dir,
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775)

        do_keystore_setup(params.credential_provider_path, params.jdbc_alias,
                          params.db_password)
        do_keystore_setup(params.credential_provider_path,
                          params.masterkey_alias,
                          params.kms_master_key_password)
        if params.stack_support_kms_hsm and params.enable_kms_hsm:
            do_keystore_setup(params.credential_provider_path,
                              params.hms_partition_alias,
                              unicode(params.hms_partition_passwd))

        XmlConfig(
            "dbks-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['dbks-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['dbks-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        XmlConfig(
            "ranger-kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        XmlConfig(
            "kms-site.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['kms-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['kms-site'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0644)

        File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
             owner=params.kms_user,
             group=params.kms_group,
             content=params.kms_log4j,
             mode=0644)
        if params.security_enabled:
            # core-site.xml linking required by setup for HDFS encryption
            XmlConfig(
                "core-site.xml",
                conf_dir=params.kms_conf_dir,
                configurations=params.config['configurations']['core-site'],
                configuration_attributes=params.
                config['configuration_attributes']['core-site'],
                owner=params.kms_user,
                group=params.kms_group,
                mode=0644)
コード例 #23
0
ファイル: hbase.py プロジェクト: Liujinan001/ambari-2.7.5
def hbase(
        name=None  # 'master' or 'regionserver' or 'client'
    ,
        action=None):
    import params

    Directory(
        params.hbase_conf_dir,
        owner=params.hbase_user,
        group=params.user_group,
        create_parents=True,
        recursive_ownership=True,
    )

    Directory(
        params.hbase_tmp_dir,
        owner=params.hbase_user,
        cd_access="a",
        create_parents=True,
        recursive_ownership=True,
    )

    Directory(os.path.join(params.local_dir, "jars"),
              owner=params.hbase_user,
              group=params.user_group,
              cd_access="a",
              mode=0775,
              create_parents=True)

    if params.hbase_wal_dir:
        Directory(
            params.hbase_wal_dir,
            owner=params.hbase_user,
            group=params.user_group,
            cd_access="a",
            create_parents=True,
            recursive_ownership=True,
        )

    merged_ams_hbase_site = {}
    merged_ams_hbase_site.update(
        params.config['configurations']['ams-hbase-site'])
    if params.security_enabled:
        merged_ams_hbase_site.update(
            params.config['configurations']['ams-hbase-security-site'])

    if not params.is_hbase_distributed:
        File(format("{hbase_conf_dir}/core-site.xml"),
             action='delete',
             owner=params.hbase_user)

        File(format("{hbase_conf_dir}/hdfs-site.xml"),
             action='delete',
             owner=params.hbase_user)

    XmlConfig("hbase-site.xml",
              conf_dir=params.hbase_conf_dir,
              configurations=merged_ams_hbase_site,
              configuration_attributes=params.config['configurationAttributes']
              ['ams-hbase-site'],
              owner=params.hbase_user,
              group=params.user_group)

    # Phoenix spool file dir if not /tmp
    if not os.path.exists(params.phoenix_server_spool_dir):
        Directory(params.phoenix_server_spool_dir,
                  owner=params.ams_user,
                  mode=0755,
                  group=params.user_group,
                  cd_access="a",
                  create_parents=True)
    pass

    if 'ams-hbase-policy' in params.config['configurations']:
        XmlConfig(
            "hbase-policy.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['ams-hbase-policy'],
            configuration_attributes=params.config['configurationAttributes']
            ['ams-hbase-policy'],
            owner=params.hbase_user,
            group=params.user_group)
    # Manually overriding ownership of file installed by hadoop package
    else:
        File(format("{params.hbase_conf_dir}/hbase-policy.xml"),
             owner=params.hbase_user,
             group=params.user_group)

    File(format("{hbase_conf_dir}/hbase-env.sh"),
         owner=params.hbase_user,
         content=InlineTemplate(params.hbase_env_sh_template))

    # Metrics properties
    File(os.path.join(params.hbase_conf_dir,
                      "hadoop-metrics2-hbase.properties"),
         owner=params.hbase_user,
         group=params.user_group,
         content=Template("hadoop-metrics2-hbase.properties.j2"))

    # hbase_TemplateConfig( params.metric_prop_file_name,
    #   tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
    # )

    hbase_TemplateConfig('regionservers', user=params.hbase_user)

    if params.security_enabled:
        hbase_TemplateConfig(format("hbase_{name}_jaas.conf"),
                             user=params.hbase_user)
        hbase_TemplateConfig(format("hbase_client_jaas.conf"),
                             user=params.hbase_user)
        hbase_TemplateConfig(format("ams_zookeeper_jaas.conf"),
                             user=params.hbase_user)

    if name != "client":
        Directory(
            params.hbase_pid_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        Directory(
            params.hbase_log_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

    if name == "master":

        if not params.is_local_fs_rootdir:
            # If executing Stop All, HDFS is probably down
            if action != 'stop' and not params.skip_create_hbase_root_dir:

                params.HdfsResource(params.hbase_root_dir,
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hbase_user,
                                    mode=0775,
                                    dfs_type=params.dfs_type)

                params.HdfsResource(params.hbase_staging_dir,
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hbase_user,
                                    mode=0711,
                                    dfs_type=params.dfs_type)

                params.HdfsResource(None, action="execute")

            if params.is_hbase_distributed:
                #Workaround for status commands not aware of operating mode
                File(format("{params.hbase_pid_dir}/distributed_mode"),
                     action="create",
                     mode=0644,
                     owner=params.hbase_user)

            pass

        else:

            local_root_dir = params.hbase_root_dir
            #cut protocol name
            if local_root_dir.startswith("file://"):
                local_root_dir = local_root_dir[7:]
                #otherwise assume dir name is provided as is

            Directory(local_root_dir,
                      owner=params.hbase_user,
                      cd_access="a",
                      create_parents=True,
                      recursive_ownership=True)

            File(format("{params.hbase_pid_dir}/distributed_mode"),
                 action="delete",
                 owner=params.hbase_user)

    if params.hbase_log4j_props is not None:
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user,
             content=InlineTemplate(params.hbase_log4j_props))
    elif os.path.exists(format("{params.hbase_conf_dir}/log4j.properties")):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user)

    generate_logfeeder_input_config(
        'ambari-metrics',
        Template("input.config-ambari-metrics.json.j2",
                 extra_imports=[default]))
コード例 #24
0
ファイル: kms.py プロジェクト: pravin-dsilva/HDP2.5-ambari
def enable_kms_plugin():

    import params

    if params.has_ranger_admin:

        ranger_flag = False

        if params.stack_supports_ranger_kerberos and params.security_enabled:
            if not is_empty(params.rangerkms_principal
                            ) and params.rangerkms_principal != '':
                ranger_flag = check_ranger_service_support_kerberos(
                    params.kms_user, params.rangerkms_keytab,
                    params.rangerkms_principal)
            else:
                ranger_flag = check_ranger_service_support_kerberos(
                    params.kms_user, params.spengo_keytab,
                    params.spnego_principal)
        else:
            ranger_flag = check_ranger_service()

        if not ranger_flag:
            Logger.error('Error in Get/Create service for Ranger Kms.')

        current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        File(format('{kms_conf_dir}/ranger-security.xml'),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644,
             content=format(
                 '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'))

        Directory([
            os.path.join('/etc', 'ranger', params.repo_name),
            os.path.join('/etc', 'ranger', params.repo_name, 'policycache')
        ],
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775,
                  create_parents=True)

        File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',
                          format('kms_{repo_name}.json')),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644)

        XmlConfig(
            "ranger-kms-audit.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']['ranger-kms-audit'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-audit'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-kms-security.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-security'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-security'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-policymgr-ssl.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-policymgr-ssl'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-kms-policymgr-ssl'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        if params.xa_audit_db_is_enabled:
            cred_setup = params.cred_setup_prefix + (
                '-f', params.credential_file, '-k', 'auditDBCred', '-v',
                PasswordString(params.xa_audit_db_password), '-c', '1')
            Execute(cred_setup,
                    environment={'JAVA_HOME': params.java_home},
                    logoutput=True,
                    sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslKeyStore', '-v',
            PasswordString(params.ssl_keystore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslTrustStore', '-v',
            PasswordString(params.ssl_truststore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        File(params.credential_file,
             owner=params.kms_user,
             group=params.kms_group,
             mode=0640)
コード例 #25
0
ファイル: hive.py プロジェクト: glenraynor/ambari
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # copy tarball to HDFS feature not supported
        if not (params.stack_version_formatted_major
                and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS,
                                        params.stack_version_formatted_major)):
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
        if params.stack_version_formatted_major and check_stack_feature(
                StackFeature.COPY_TARBALL_TO_HDFS,
                params.stack_version_formatted_major):
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)
            copy_to_hdfs("tez",
                         params.user_group,
                         params.hdfs_user,
                         host_sys_prepped=params.host_sys_prepped)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file,
                     host_sys_prepped=params.host_sys_prepped)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file,
                             host_sys_prepped=params.host_sys_prepped)
        # ******* End Copy Tarballs *******
        # *********************************

        # if warehouse directory is in DFS
        if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
                params.default_fs).scheme:
            # Create Hive Metastore Warehouse Dir
            params.HdfsResource(params.hive_apps_whs_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hive_user,
                                mode=0777)
        else:
            Logger.info(
                format(
                    "Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."
                ))

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    setup_atlas_hive()

    if name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    if params.hive_metastore_site_supported and name == 'metastore':
        XmlConfig(
            "hivemetastore-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']
            ['hivemetastore-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hivemetastore-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if name == 'metastore' or name == 'hiveserver2':
        if params.hive_jdbc_target is not None and not os.path.exists(
                params.hive_jdbc_target):
            jdbc_connector(params.hive_jdbc_target,
                           params.hive_previous_jdbc_jar)
        if params.hive2_jdbc_target is not None and not os.path.exists(
                params.hive2_jdbc_target):
            jdbc_connector(params.hive2_jdbc_target,
                           params.hive2_previous_jdbc_jar)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hivemetastore.properties"),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-hivemetastore.properties.j2"))

        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_schematool_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p} -verbose")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_schematool_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p} -verbose"),
                params.hive_user)

            # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
            # Fixing it with the hack below:
            quoted_hive_metastore_user_passwd = quote_bash_args(
                quote_bash_args(params.hive_metastore_user_passwd))
            if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
                or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
                quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[
                    1:-1]
            Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(
                check_schema_created_cmd.replace(
                    format("-passWord {quoted_hive_metastore_user_passwd}"),
                    "-passWord " + utils.PASSWORDS_HIDE_STRING))

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

        File(os.path.join(params.hive_server_conf_dir,
                          "hadoop-metrics2-hiveserver2.properties"),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-hiveserver2.properties.j2"))

    if name != "client":
        Directory(params.hive_pid_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_log_dir,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
        Directory(params.hive_var_lib,
                  create_parents=True,
                  cd_access='a',
                  owner=params.hive_user,
                  group=params.user_group,
                  mode=0755)
コード例 #26
0
def yarn(name=None, config_dir=None):
    """
  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
  """
    import params

    if config_dir is None:
        config_dir = params.hadoop_conf_dir

    if name == "historyserver":
        if params.yarn_log_aggregation_enabled:
            params.HdfsResource(params.yarn_nm_app_log_dir,
                                action="create_on_execute",
                                type="directory",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0777,
                                recursive_chmod=True)

        # create the /tmp folder with proper permissions if it doesn't exist yet
        if params.entity_file_history_directory.startswith('/tmp'):
            params.HdfsResource(
                params.hdfs_tmp_dir,
                action="create_on_execute",
                type="directory",
                owner=params.hdfs_user,
                mode=0777,
            )

        params.HdfsResource(params.entity_file_history_directory,
                            action="create_on_execute",
                            type="directory",
                            owner=params.yarn_user,
                            group=params.user_group)
        params.HdfsResource("/mapred",
                            type="directory",
                            action="create_on_execute",
                            owner=params.mapred_user)
        params.HdfsResource("/mapred/system",
                            type="directory",
                            action="create_on_execute",
                            owner=params.hdfs_user)
        params.HdfsResource(params.mapreduce_jobhistory_done_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.mapred_user,
                            group=params.user_group,
                            change_permissions_for_parents=True,
                            mode=0777)
        params.HdfsResource(None, action="execute")
        Directory(
            params.jhs_leveldb_state_store_dir,
            owner=params.mapred_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
            recursive_ownership=True,
        )

    #<editor-fold desc="Node Manager Section">
    if name == "nodemanager":

        # First start after enabling/disabling security
        if params.toggle_nm_security:
            Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
                      action='delete')

            # If yarn.nodemanager.recovery.dir exists, remove this dir
            if params.yarn_nodemanager_recovery_dir:
                Directory(InlineTemplate(
                    params.yarn_nodemanager_recovery_dir).get_content(),
                          action='delete')

            # Setting NM marker file
            if params.security_enabled:
                Directory(params.nm_security_marker_dir)
                File(
                    params.nm_security_marker,
                    content=
                    "Marker file to track first start after enabling/disabling security. "
                    "During first start yarn local, log dirs are removed and recreated"
                )
            elif not params.security_enabled:
                File(params.nm_security_marker, action="delete")

        if not params.security_enabled or params.toggle_nm_security:
            # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
            nm_log_dir_to_mount_file_content = handle_mounted_dirs(
                create_log_dir, params.nm_log_dirs,
                params.nm_log_dir_to_mount_file, params)
            # create a history file used by handle_mounted_dirs
            File(params.nm_log_dir_to_mount_file,
                 owner=params.hdfs_user,
                 group=params.user_group,
                 mode=0644,
                 content=nm_log_dir_to_mount_file_content)
            nm_local_dir_to_mount_file_content = handle_mounted_dirs(
                create_local_dir, params.nm_local_dirs,
                params.nm_local_dir_to_mount_file, params)
            File(params.nm_local_dir_to_mount_file,
                 owner=params.hdfs_user,
                 group=params.user_group,
                 mode=0644,
                 content=nm_local_dir_to_mount_file_content)
    #</editor-fold>

    if params.yarn_nodemanager_recovery_dir:
        Directory(
            InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            mode=0755,
            cd_access='a',
        )

    Directory(
        [params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )

    Directory(
        [
            params.mapred_pid_dir_prefix, params.mapred_pid_dir,
            params.mapred_log_dir_prefix, params.mapred_log_dir
        ],
        owner=params.mapred_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [params.yarn_log_dir_prefix],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        ignore_failures=True,
        cd_access='a',
    )

    XmlConfig(
        "core-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['core-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    # During RU, Core Masters and Slaves need hdfs-site.xml
    # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
    # RU should rely on all available in <stack-root>/<version>/hadoop/conf
    if 'hdfs-site' in params.config['configurations']:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hdfs-site'],
            owner=params.hdfs_user,
            group=params.user_group,
            mode=0644)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "yarn-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['yarn-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['yarn-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configuration_attributes']
        ['capacity-scheduler'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    if name == 'resourcemanager':
        Directory(
            params.rm_nodes_exclude_dir,
            mode=0755,
            create_parents=True,
            cd_access='a',
        )
        File(params.rm_nodes_exclude_path,
             owner=params.yarn_user,
             group=params.user_group)
        File(params.yarn_job_summary_log,
             owner=params.yarn_user,
             group=params.user_group)
        if not is_empty(
                params.node_label_enable
        ) and params.node_label_enable or is_empty(
                params.node_label_enable) and params.node_labels_dir:
            params.HdfsResource(params.node_labels_dir,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0700)
            params.HdfsResource(None, action="execute")

    elif name == 'apptimelineserver':
        Directory(
            params.ats_leveldb_dir,
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
        )

        # if stack support application timeline-service state store property (timeline_state_store stack feature)
        if params.stack_supports_timeline_state_store:
            Directory(
                params.ats_leveldb_state_store_dir,
                owner=params.yarn_user,
                group=params.user_group,
                create_parents=True,
                cd_access="a",
            )
        # app timeline server 1.5 directories
        if not is_empty(params.entity_groupfs_store_dir):
            parent_path = os.path.dirname(params.entity_groupfs_store_dir)
            params.HdfsResource(parent_path,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0755)
            params.HdfsResource(params.entity_groupfs_store_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=params.entity_groupfs_store_dir_mode)
        if not is_empty(params.entity_groupfs_active_dir):
            parent_path = os.path.dirname(params.entity_groupfs_active_dir)
            params.HdfsResource(parent_path,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0755)
            params.HdfsResource(params.entity_groupfs_active_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=params.entity_groupfs_active_dir_mode)
        params.HdfsResource(None, action="execute")

    File(format("{limits_conf_dir}/yarn.conf"),
         mode=0644,
         content=Template('yarn.conf.j2'))

    File(format("{limits_conf_dir}/mapreduce.conf"),
         mode=0644,
         content=Template('mapreduce.conf.j2'))

    File(os.path.join(config_dir, "yarn-env.sh"),
         owner=params.yarn_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.yarn_env_sh_template))

    container_executor = format("{yarn_container_bin}/container-executor")
    File(container_executor,
         group=params.yarn_executor_container_group,
         mode=params.container_executor_mode)

    File(os.path.join(config_dir, "container-executor.cfg"),
         group=params.user_group,
         mode=0644,
         content=Template('container-executor.cfg.j2'))

    Directory(params.cgroups_dir,
              group=params.user_group,
              create_parents=True,
              mode=0755,
              cd_access="a")

    if params.security_enabled:
        tc_mode = 0644
        tc_owner = "root"
    else:
        tc_mode = None
        tc_owner = params.hdfs_user

    File(os.path.join(config_dir, "mapred-env.sh"),
         owner=tc_owner,
         mode=0755,
         content=InlineTemplate(params.mapred_env_sh_template))

    if params.security_enabled:
        File(os.path.join(params.hadoop_bin, "task-controller"),
             owner="root",
             group=params.mapred_tt_group,
             mode=06050)
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=tc_owner,
             mode=tc_mode,
             group=params.mapred_tt_group,
             content=Template("taskcontroller.cfg.j2"))
        File(os.path.join(config_dir, 'yarn_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             content=Template("yarn_jaas.conf.j2"))
    else:
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=tc_owner,
             content=Template("taskcontroller.cfg.j2"))

    if "mapred-site" in params.config['configurations']:
        XmlConfig(
            "mapred-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['mapred-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['mapred-site'],
            owner=params.mapred_user,
            group=params.user_group)

    if "capacity-scheduler" in params.config['configurations']:
        XmlConfig(
            "capacity-scheduler.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']
            ['capacity-scheduler'],
            configuration_attributes=params.config['configuration_attributes']
            ['capacity-scheduler'],
            owner=params.hdfs_user,
            group=params.user_group)
    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)
    if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
        File(os.path.join(config_dir, 'fair-scheduler.xml'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-client.xml.example')):
        File(os.path.join(config_dir, 'ssl-client.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-server.xml.example')):
        File(os.path.join(config_dir, 'ssl-server.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)
コード例 #27
0
ファイル: hive.py プロジェクト: glenraynor/ambari
def hive(name=None):
    import params

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_conf_dir,
        configurations=params.config['configurations']['hive-site'],
        owner=params.hive_user,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'])

    if name in ["hiveserver2", "metastore"]:
        # Manually overriding service logon user & password set by the installation package
        service_name = params.service_map[name]
        ServiceConfig(service_name,
                      action="change_user",
                      username=params.hive_user,
                      password=Script.get_password(params.hive_user))
        Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"),
                logoutput=True,
                user=params.hadoop_user)

    if name == 'metastore':
        if params.init_metastore_schema:
            check_schema_created_cmd = format(
                'cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
                '-dbType {hive_metastore_db_type} '
                '-userName {hive_metastore_user_name} '
                '-passWord {hive_metastore_user_passwd!p}'
                '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"',  #cmd "feature", propagate the process exit code manually
                hive_bin=params.hive_bin,
                hive_metastore_db_type=params.hive_metastore_db_type,
                hive_metastore_user_name=params.hive_metastore_user_name,
                hive_metastore_user_passwd=params.hive_metastore_user_passwd)
            try:
                Execute(check_schema_created_cmd)
            except Fail:
                create_schema_cmd = format(
                    'cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
                    '-dbType {hive_metastore_db_type} '
                    '-userName {hive_metastore_user_name} '
                    '-passWord {hive_metastore_user_passwd!p}',
                    hive_bin=params.hive_bin,
                    hive_metastore_db_type=params.hive_metastore_db_type,
                    hive_metastore_user_name=params.hive_metastore_user_name,
                    hive_metastore_user_passwd=params.
                    hive_metastore_user_passwd)
                Execute(create_schema_cmd,
                        user=params.hive_user,
                        logoutput=True)

    if name == "hiveserver2":
        if params.hive_execution_engine == "tez":
            # Init the tez app dir in hadoop
            script_file = __file__.replace('/', os.sep)
            cmd_file = os.path.normpath(
                os.path.join(os.path.dirname(script_file), "..", "files",
                             "hiveTezSetup.cmd"))

            Execute("cmd /c " + cmd_file,
                    logoutput=True,
                    user=params.hadoop_user)
コード例 #28
0
def setup_spark(env, type, upgrade_type=None, action=None):
    import params

    # ensure that matching LZO libraries are installed for Spark
    lzo_utils.install_lzo_if_needed()

    Directory([params.spark_pid_dir, params.spark_log_dir],
              owner=params.spark_user,
              group=params.user_group,
              mode=0775,
              create_parents=True)
    if type == 'server' and action == 'config':
        Directory(params.spark2_lib_dir,
                  owner=params.spark_user,
                  group=params.user_group,
                  create_parents=True,
                  mode=0775)

        Directory(params.spark_history_store_path,
                  owner=params.spark_user,
                  group=params.user_group,
                  create_parents=True,
                  mode=0775)

        params.HdfsResource(params.spark_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.spark_user,
                            mode=0775)

        if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
                params.default_fs).scheme:
            # Create Spark Warehouse Dir
            params.HdfsResource(params.spark_warehouse_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.spark_user,
                                mode=0777)

        params.HdfsResource(None, action="execute")

        generate_logfeeder_input_config(
            'spark2',
            Template("input.config-spark2.json.j2", extra_imports=[default]))

    spark2_defaults = dict(params.config['configurations']['spark2-defaults'])

    if params.security_enabled:
        spark2_defaults.pop("history.server.spnego.kerberos.principal")
        spark2_defaults.pop("history.server.spnego.keytab.file")
        spark2_defaults['spark.history.kerberos.principal'] = spark2_defaults[
            'spark.history.kerberos.principal'].replace(
                '_HOST',
                socket.getfqdn().lower())

    PropertiesFile(format("{spark_conf}/spark-defaults.conf"),
                   properties=spark2_defaults,
                   key_value_delimiter=" ",
                   owner=params.spark_user,
                   group=params.spark_group,
                   mode=0644)

    # create spark-env.sh in etc/conf dir
    File(
        os.path.join(params.spark_conf, 'spark-env.sh'),
        owner=params.spark_user,
        group=params.spark_group,
        content=InlineTemplate(params.spark_env_sh),
        mode=0644,
    )

    #create log4j.properties in etc/conf dir
    File(
        os.path.join(params.spark_conf, 'log4j.properties'),
        owner=params.spark_user,
        group=params.spark_group,
        content=params.spark_log4j_properties,
        mode=0644,
    )

    #create metrics.properties in etc/conf dir
    File(os.path.join(params.spark_conf, 'metrics.properties'),
         owner=params.spark_user,
         group=params.spark_group,
         content=InlineTemplate(params.spark_metrics_properties),
         mode=0644)

    if params.is_hive_installed:
        XmlConfig("hive-site.xml",
                  conf_dir=params.spark_conf,
                  configurations=params.spark_hive_properties,
                  owner=params.spark_user,
                  group=params.spark_group,
                  mode=0644)

    create_atlas_configs()

    if params.has_spark_thriftserver:
        spark2_thrift_sparkconf = dict(
            params.config['configurations']['spark2-thrift-sparkconf'])

        if params.security_enabled and 'spark.yarn.principal' in spark2_thrift_sparkconf:
            spark2_thrift_sparkconf[
                'spark.yarn.principal'] = spark2_thrift_sparkconf[
                    'spark.yarn.principal'].replace('_HOST',
                                                    socket.getfqdn().lower())

        PropertiesFile(params.spark_thrift_server_conf_file,
                       properties=spark2_thrift_sparkconf,
                       owner=params.hive_user,
                       group=params.user_group,
                       key_value_delimiter=" ",
                       mode=0644)

    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
    if effective_version:
        effective_version = format_stack_version(effective_version)

    if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(
            StackFeature.SPARK_16PLUS, effective_version):
        # create spark-thrift-fairscheduler.xml
        File(os.path.join(params.spark_conf, "spark-thrift-fairscheduler.xml"),
             owner=params.spark_user,
             group=params.spark_group,
             mode=0755,
             content=InlineTemplate(params.spark_thrift_fairscheduler_content))

    if type == "client":
        check_sac_jar()
コード例 #29
0
def setup_ranger_admin(upgrade_type=None):
    import params

    if upgrade_type is None:
        upgrade_type = Script.get_upgrade_type(
            default("/commandParams/upgrade_type", ""))

    ranger_home = params.ranger_home
    ranger_conf = params.ranger_conf

    Directory(ranger_conf,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    copy_jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
        cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
    else:
        cp = cp + os.pathsep + format("{driver_curl_target}")
    cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")

    db_connection_check_command = format(
        "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}"
    )

    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
        env_dict = {'LD_LIBRARY_PATH': params.ld_lib_path}

    Execute(db_connection_check_command,
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            tries=5,
            try_sleep=10,
            environment=env_dict)

    Execute(
        ('ln', '-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'),
         format('{ranger_home}/conf')),
        not_if=format("ls {ranger_home}/conf"),
        only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
        sudo=True)

    if upgrade_type is not None:
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')

        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    Directory(
        format('{ranger_home}/'),
        owner=params.unix_user,
        group=params.unix_group,
        recursive_ownership=True,
    )

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    if params.stack_supports_pid:
        File(
            format('{ranger_conf}/ranger-admin-env-piddir.sh'),
            content=format(
                "export RANGER_PID_DIR_PATH={ranger_pid_dir}\nexport RANGER_USER={unix_user}"
            ),
            owner=params.unix_user,
            group=params.unix_group,
            mode=0755)

    Directory(params.admin_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True,
              cd_access='a',
              mode=0755)

    File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
         content=format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    if os.path.isfile(params.ranger_admin_default_file):
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.ranger_admin_default_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.security_app_context_file):
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.security_app_context_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)

    if upgrade_type is not None and params.stack_supports_config_versioning:
        if os.path.islink('/usr/bin/ranger-admin'):
            Link('/usr/bin/ranger-admin', action="delete")

        Link('/usr/bin/ranger-admin',
             to=format('{ranger_home}/ews/ranger-admin-services.sh'))

    if default(
            "/configurations/ranger-admin-site/ranger.authentication.method",
            "") == 'PAM':
        d = '/etc/pam.d'
        if os.path.isdir(d):
            if os.path.isfile(os.path.join(d, 'ranger-admin')):
                Logger.info('ranger-admin PAM file already exists.')
            else:
                File(format('{d}/ranger-admin'),
                     content=Template('ranger_admin_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
            if os.path.isfile(os.path.join(d, 'ranger-remote')):
                Logger.info('ranger-remote PAM file already exists.')
            else:
                File(format('{d}/ranger-remote'),
                     content=Template('ranger_remote_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
        else:
            Logger.error(
                "Unable to use PAM authentication, /etc/pam.d/ directory does not exist."
            )

    Execute(('ln', '-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),
             '/usr/bin/ranger-admin'),
            not_if=format("ls /usr/bin/ranger-admin"),
            only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
            sudo=True)

    # remove plain-text password from xml configs

    ranger_admin_site_copy = {}
    ranger_admin_site_copy.update(
        params.config['configurations']['ranger-admin-site'])
    for prop in params.ranger_admin_password_properties:
        if prop in ranger_admin_site_copy:
            ranger_admin_site_copy[prop] = "_"

    XmlConfig(
        "ranger-admin-site.xml",
        conf_dir=ranger_conf,
        configurations=ranger_admin_site_copy,
        configuration_attributes=params.config['configuration_attributes']
        ['ranger-admin-site'],
        owner=params.unix_user,
        group=params.unix_group,
        mode=0644)

    Directory(
        os.path.join(ranger_conf, 'ranger_jaas'),
        mode=0700,
        owner=params.unix_user,
        group=params.unix_group,
    )

    if params.stack_supports_ranger_log4j:
        File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=InlineTemplate(params.admin_log4j),
             mode=0644)

    do_keystore_setup(upgrade_type=upgrade_type)

    create_core_site_xml(ranger_conf)

    if params.stack_supports_ranger_kerberos and params.security_enabled:
        if params.is_hbase_ha_enabled and params.ranger_hbase_plugin_enabled:
            XmlConfig(
                "hbase-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hbase-site'],
                configuration_attributes=params.
                config['configuration_attributes']['hbase-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)

        if params.is_namenode_ha_enabled and params.ranger_hdfs_plugin_enabled:
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configuration_attributes']['hdfs-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)
コード例 #30
0
ファイル: oozie.py プロジェクト: mbigelow/ambari
def oozie_server_specific():
    import params

    File(
        params.pid_file,
        action="delete",
        not_if=
        "ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
    )

    oozie_server_directories = [
        format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir,
        params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir,
        params.oozie_lib_dir, params.oozie_webapps_dir,
        params.oozie_webapps_conf_dir, params.oozie_server_dir
    ]
    Directory(
        oozie_server_directories,
        owner=params.oozie_user,
        group=params.user_group,
        mode=0755,
        recursive=True,
        cd_access="a",
    )

    Directory(
        params.oozie_libext_dir,
        recursive=True,
    )

    no_op_test = format(
        "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"
    )
    if not params.host_sys_prepped:
        configure_cmds = []
        configure_cmds.append(
            ('tar', '-xvf', format('{oozie_home}/oozie-sharelib.tar.gz'), '-C',
             params.oozie_home))
        configure_cmds.append(
            ('cp', params.ext_js_path, params.oozie_libext_dir))
        configure_cmds.append(('chown', format('{oozie_user}:{user_group}'),
                               format('{oozie_libext_dir}/{ext_js_file}')))
        configure_cmds.append(
            ('chown', '-RL', format('{oozie_user}:{user_group}'),
             params.oozie_webapps_conf_dir))

        Execute(
            configure_cmds,
            not_if=no_op_test,
            sudo=True,
        )

    if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
       params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
        File(
            params.downloaded_custom_connector,
            content=DownloadSource(params.driver_curl_source),
        )

        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             params.target),
            #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
            path=["/bin", "/usr/bin/"],
            sudo=True)

        File(params.target, owner=params.oozie_user, group=params.user_group)

    #falcon el extension
    if params.has_falcon_host:
        Execute(
            format(
                '{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'
            ),
            not_if=no_op_test,
        )
        Execute(
            format(
                '{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'
            ),
            not_if=no_op_test,
        )
    if params.lzo_enabled and len(params.all_lzo_packages) > 0:
        Package(params.all_lzo_packages)
        Execute(
            format(
                '{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
            not_if=no_op_test,
        )

    Execute(format(
        "cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure}"),
            user=params.oozie_user,
            not_if=no_op_test)

    if params.hdp_stack_version != "" and compare_versions(
            params.hdp_stack_version, '2.2') >= 0:
        # Create hive-site and tez-site configs for oozie
        Directory(params.hive_conf_dir,
                  recursive=True,
                  owner=params.oozie_user,
                  group=params.user_group)
        if 'hive-site' in params.config['configurations']:
            XmlConfig(
                "hive-site.xml",
                conf_dir=params.hive_conf_dir,
                configurations=params.config['configurations']['hive-site'],
                configuration_attributes=params.
                config['configuration_attributes']['hive-site'],
                owner=params.oozie_user,
                group=params.user_group,
                mode=0644)
        if 'tez-site' in params.config['configurations']:
            XmlConfig(
                "tez-site.xml",
                conf_dir=params.hive_conf_dir,
                configurations=params.config['configurations']['tez-site'],
                configuration_attributes=params.
                config['configuration_attributes']['tez-site'],
                owner=params.oozie_user,
                group=params.user_group,
                mode=0664)
    Execute(('chown', '-R', format("{oozie_user}:{user_group}"),
             params.oozie_server_dir),
            sudo=True)