コード例 #1
0
ファイル: repository.py プロジェクト: fanzhidongyzby/ambari
 def action_create(self):
     with Environment.get_instance_copy() as env:
         repo_file_name = self.resource.repo_file_name
         repo_dir = repos_dirs[env.system.os_family]
         repo_template = self.resource.repo_template
         new_content = Template(
             repo_template,
             repo_id=self.resource.repo_id,
             repo_file_name=self.resource.repo_file_name,
             base_url=self.resource.base_url,
             mirror_list=self.resource.mirror_list,
         )
         repo_file_path = format("{repo_dir}/{repo_file_name}.repo")
         if self.resource.append_to_file and os.path.isfile(repo_file_path):
             with open(repo_file_path, "a") as repo_file:
                 repo_file.write("\n" + new_content.get_content())
         else:
             File(repo_file_path, content=new_content)
コード例 #2
0
ファイル: TestContentSources.py プロジェクト: LXiong/slider
  def test_template_loader_arguments(self, exists_mock, getmtime_mock, open_mock):
    """
    Testing template loader additional arguments in template and absolute file-path
    """
    exists_mock.return_value = True
    getmtime_mock.return_value = 10
    file_mock = MagicMock(name = 'file_mock')
    file_mock.__enter__.return_value = file_mock
    file_mock.read.return_value = '{{test_arg1}} template content'
    open_mock.return_value = file_mock

    with Environment("/base") as env:
      template = Template("/absolute/path/test.j2", [], test_arg1 = "test")
      content = template.get_content()
    self.assertEqual(open_mock.call_count, 1)

    self.assertEqual(u'test template content\n', content)
    open_mock.assert_called_with('/absolute/path/test.j2', 'rb')
    self.assertEqual(getmtime_mock.call_count, 1)
    getmtime_mock.assert_called_with('/absolute/path/test.j2')
コード例 #3
0
ファイル: hdfs_namenode.py プロジェクト: runningt/ambari
def namenode(action=None,
             hdfs_binary=None,
             do_format=True,
             upgrade_type=None,
             upgrade_suspended=False,
             env=None):

    if action is None:
        raise Fail('"action" parameter is required for function namenode().')

    if action in ["start", "stop"] and hdfs_binary is None:
        raise Fail(
            '"hdfs_binary" parameter is required for function namenode().')

    if action == "configure":
        import params
        #we need this directory to be present before any action(HA manual steps for
        #additional namenode)
        create_name_dirs(params.dfs_name_dir)

        # set up failover /  secure zookeper ACLs, this feature is supported from HDP 2.6 ownwards
        set_up_zkfc_security(params)
    elif action == "start":
        Logger.info("Called service {0} with upgrade_type: {1}".format(
            action, str(upgrade_type)))
        setup_ranger_hdfs(upgrade_type=upgrade_type)
        import params

        File(params.exclude_file_path,
             content=Template("exclude_hosts_list.j2"),
             owner=params.hdfs_user,
             group=params.user_group)

        if do_format and not params.hdfs_namenode_format_disabled:
            format_namenode()
            pass


        if params.dfs_ha_enabled and \
          params.dfs_ha_namenode_standby is not None and \
          params.hostname == params.dfs_ha_namenode_standby:
            # if the current host is the standby NameNode in an HA deployment
            # run the bootstrap command, to start the NameNode in standby mode
            # this requires that the active NameNode is already up and running,
            # so this execute should be re-tried upon failure, up to a timeout
            success = bootstrap_standby_namenode(params)
            if not success:
                raise Fail("Could not bootstrap standby namenode")

        if upgrade_type == constants.UPGRADE_TYPE_ROLLING and params.dfs_ha_enabled:
            # Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
            # to kill ZKFC manually, so we need to start it if not already running.
            safe_zkfc_op(action, env)

        options = ""
        if upgrade_type == constants.UPGRADE_TYPE_ROLLING:
            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"
        elif upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
            is_previous_image_dir = is_previous_fs_image()
            Logger.info("Previous file system image dir present is {0}".format(
                str(is_previous_image_dir)))

            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"
        elif upgrade_type == constants.UPGRADE_TYPE_HOST_ORDERED:
            # nothing special to do for HOU - should be very close to a normal restart
            pass
        elif upgrade_type is None and upgrade_suspended is True:
            # the rollingUpgrade flag must be passed in during a suspended upgrade when starting NN
            if os.path.exists(
                    namenode_upgrade.get_upgrade_in_progress_marker()):
                options = "-rollingUpgrade started"
            else:
                Logger.info(
                    "The NameNode upgrade marker file {0} does not exist, yet an upgrade is currently suspended. "
                    "Assuming that the upgrade of NameNode has not occurred yet."
                    .format(namenode_upgrade.get_upgrade_in_progress_marker()))

        Logger.info("Options for start command are: {0}".format(options))

        service(action="start",
                name="namenode",
                user=params.hdfs_user,
                options=options,
                create_pid_dir=True,
                create_log_dir=True)

        if params.security_enabled:
            Execute(format(
                "{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"
            ),
                    user=params.hdfs_user)

        # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
        # no-HA                 | ON -> OFF                | Yes                      |
        # HA and active         | ON -> OFF                | Yes                      |
        # HA and standby        | no change                | No                       |
        # RU with HA on active  | ON -> OFF                | Yes                      |
        # RU with HA on standby | ON -> OFF                | Yes                      |
        # EU with HA on active  | ON -> OFF                | No                       |
        # EU with HA on standby | ON -> OFF                | No                       |
        # EU non-HA             | ON -> OFF                | No                       |

        # because we do things like create directories after starting NN,
        # the vast majority of the time this should be True - it should only
        # be False if this is HA and we are the Standby NN
        ensure_safemode_off = True

        # True if this is the only NameNode (non-HA) or if its the Active one in HA
        is_active_namenode = True

        if params.dfs_ha_enabled:
            Logger.info(
                "Waiting for the NameNode to broadcast whether it is Active or Standby..."
            )

            if is_this_namenode_active() is False:
                # we are the STANDBY NN
                is_active_namenode = False

                # we are the STANDBY NN and this restart is not part of an upgrade
                if upgrade_type is None:
                    ensure_safemode_off = False

        # During an Express Upgrade, NameNode will not leave SafeMode until the DataNodes are started,
        # so always disable the Safemode check
        if upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
            ensure_safemode_off = False

        # some informative logging separate from the above logic to keep things a little cleaner
        if ensure_safemode_off:
            Logger.info(
                "Waiting for this NameNode to leave Safemode due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}"
                .format(params.dfs_ha_enabled, is_active_namenode,
                        upgrade_type))
        else:
            Logger.info(
                "Skipping Safemode check due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}"
                .format(params.dfs_ha_enabled, is_active_namenode,
                        upgrade_type))

        # wait for Safemode to end
        if ensure_safemode_off:
            if params.rolling_restart and params.rolling_restart_safemode_exit_timeout:
                calculated_retries = int(
                    params.rolling_restart_safemode_exit_timeout) / 30
                wait_for_safemode_off(hdfs_binary,
                                      afterwait_sleep=30,
                                      retries=calculated_retries,
                                      sleep_seconds=30)
            else:
                wait_for_safemode_off(hdfs_binary)

        # Always run this on the "Active" NN unless Safemode has been ignored
        # in the case where safemode was ignored (like during an express upgrade), then
        # NN will be in SafeMode and cannot have directories created
        if is_active_namenode and ensure_safemode_off:
            create_hdfs_directories()
            create_ranger_audit_hdfs_directories()
        else:
            Logger.info(
                "Skipping creation of HDFS directories since this is either not the Active NameNode or we did not wait for Safemode to finish."
            )

    elif action == "stop":
        import params
        service(action="stop", name="namenode", user=params.hdfs_user)
    elif action == "status":
        import status_params
        check_process_status(status_params.namenode_pid_file)
    elif action == "decommission":
        decommission()
コード例 #4
0
ファイル: clickhouse.py プロジェクト: sjqzhang/bigdata
def clickhouse(upgrade_type=None):
    import params
    ensure_base_directories()

    # clickhouse server all configuration, return result type dict
    clickhouse_config_template = mutable_config_dict(params.clickhouse_config_json_template)
    clickhouse_metrika_template = mutable_config_dict(params.clickhouse_metrika_json_template)
    
    effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
    Logger.info(format("Effective stack version: {effective_version}"))

    # listeners and advertised.listeners are only added in 2.3.0.0 onwards.
    if effective_version is not None and effective_version != "":
       clickhouse_server_host = clickhouse_config_template['interserver_http_host'] = params.hostname
       Logger.info(format("clickhouse interserver_http_host: {clickhouse_server_host}"))
    else:
       listeners = clickhouse_config_template['interserver_http_host'].replace("localhost", params.hostname)
       Logger.info(format("clickhouse interserver_http_host: {listeners}"))

    #format convert
    import clickhouse_utils

    clickhouse_config = clickhouse_utils.clickhouseConfigToXML(clickhouse_config_template)
    clickhouse_metrika = clickhouse_utils.clickhouseMetrikaToXML(params.tcp_port,params.user_admin,params.user_admin_password,params.clickhouse_hosts,params.zookeeper_hosts,params.remote_servers,params.hostname,params.zookeeper_server,clickhouse_metrika_template)
    
    Directory(params.clickhouse_log_dir,
              mode=0755,
              cd_access='a',
              owner=params.clickhouse_user,
              group=params.clickhouse_group,
              create_parents = True,
              recursive_ownership = True,
    )

    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.clickhouse_user,
              group=params.clickhouse_group,
              create_parents = True,
              recursive_ownership = True,
    )

    File(format("{conf_dir}/config.xml"),
                      owner=params.clickhouse_user,
                      group=params.clickhouse_group,
                      content=InlineTemplate(clickhouse_config)
    )

    File(format("{conf_dir}/metrika.xml"),
          owner=params.clickhouse_user,
          group=params.clickhouse_group,
          content=InlineTemplate(clickhouse_metrika)
     )

    File(format("{conf_dir}/users.xml"),
          owner=params.clickhouse_user,
          group=params.clickhouse_group,
          content=Template("clickhouse-users.xml.j2")
     )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents = True,
              owner='root',
              group='root'
    )

    File(os.path.join(params.limits_conf_dir, 'clickhouse.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("clickhouse.conf.j2")
    )

    File(os.path.join(params.bindir, 'clickhouse-manager.sh'),
         owner='root',
         group='root',
         mode=0755,
         content=Template("clickhouse-manager.sh.j2")
    )
    
    File(os.path.join(params.crondir, 'clickhouse-server'),
         owner='root',
         group='root',
         mode=0755,
         content=Template("clickhouse-server-cron.j2")
    )
コード例 #5
0
def Template(name, **kwargs):
    with RMFTestCase.env:
        from resource_management.core.source import Template
        return Template(name, **kwargs)
コード例 #6
0
ファイル: flume.py プロジェクト: tsingfu/bigdata
def flume(action=None):
    import params

    if action == 'config':
        # remove previously defined meta's
        for n in find_expected_agent_names(params.flume_conf_dir):
            File(
                os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'),
                action="delete",
            )
        if params.security_enabled:
            File(format("{conf_dir}/flume_jaas.conf"),
                 owner=params.flume_user,
                 content=InlineTemplate(params.flume_jaas_conf_template))

        Directory(
            params.flume_run_dir,
            group=params.user_group,
            owner=params.flume_user,
        )

        Directory(
            params.flume_conf_dir,
            create_parents=True,
            owner=params.flume_user,
        )
        Directory(
            params.flume_log_dir,
            group=params.user_group,
            owner=params.flume_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        flume_agents = {}
        if params.flume_conf_content is not None:
            flume_agents = build_flume_topology(params.flume_conf_content)

        for agent in flume_agents.keys():
            flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent)
            flume_agent_conf_file = os.path.join(flume_agent_conf_dir,
                                                 'flume.conf')
            flume_agent_meta_file = os.path.join(flume_agent_conf_dir,
                                                 'ambari-meta.json')
            flume_agent_log4j_file = os.path.join(flume_agent_conf_dir,
                                                  'log4j.properties')
            flume_agent_env_file = os.path.join(flume_agent_conf_dir,
                                                'flume-env.sh')

            Directory(
                flume_agent_conf_dir,
                owner=params.flume_user,
            )

            PropertiesFile(flume_agent_conf_file,
                           properties=flume_agents[agent],
                           owner=params.flume_user,
                           mode=0644)

            File(flume_agent_log4j_file,
                 content=Template('log4j.properties.j2', agent_name=agent),
                 owner=params.flume_user,
                 mode=0644)

            File(flume_agent_meta_file,
                 content=json.dumps(ambari_meta(agent, flume_agents[agent])),
                 owner=params.flume_user,
                 mode=0644)

            File(flume_agent_env_file,
                 owner=params.flume_user,
                 content=InlineTemplate(params.flume_env_sh_template))

            if params.has_metric_collector:
                File(os.path.join(flume_agent_conf_dir,
                                  "flume-metrics2.properties"),
                     owner=params.flume_user,
                     content=Template("flume-metrics2.properties.j2"))

    elif action == 'start':
        # desired state for service should be STARTED
        if len(params.flume_command_targets) == 0:
            _set_desired_state('STARTED')

        # It is important to run this command as a background process.

        flume_base = as_user(format(
            "{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}} > {flume_log_dir}/{{4}}.out 2>&1"
        ),
                             params.flume_user,
                             env={'JAVA_HOME': params.java_home}) + " &"

        for agent in cmd_target_names():
            flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
            flume_agent_conf_file = flume_agent_conf_dir + os.sep + "flume.conf"
            flume_agent_pid_file = params.flume_run_dir + os.sep + agent + ".pid"

            if not os.path.isfile(flume_agent_conf_file):
                continue

            if not is_flume_process_live(flume_agent_pid_file):
                # TODO someday make the ganglia ports configurable
                extra_args = ''
                if params.ganglia_server_host is not None:
                    extra_args = '-Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts={0}:{1}'
                    extra_args = extra_args.format(params.ganglia_server_host,
                                                   '8655')
                if params.has_metric_collector:
                    extra_args = '-Dflume.monitoring.type=org.apache.hadoop.metrics2.sink.flume.FlumeTimelineMetricsSink ' \
                                 '-Dflume.monitoring.node={0}:{1}'
                    # TODO check if this is used.
                    extra_args = extra_args.format(
                        params.metric_collector_host,
                        params.metric_collector_port)

                flume_cmd = flume_base.format(agent, flume_agent_conf_dir,
                                              flume_agent_conf_file,
                                              extra_args, agent)

                Execute(flume_cmd,
                        wait_for_finish=False,
                        environment={'JAVA_HOME': params.java_home})
                # sometimes startup spawns a couple of threads - so only the first line may count
                pid_cmd = as_sudo(('pgrep', '-o', '-u', params.flume_user, '-f', format('^{java_home}.*{agent}.*'))) + \
                          " | " + as_sudo(('tee', flume_agent_pid_file)) + "  && test ${PIPESTATUS[0]} -eq 0"

                try:
                    Execute(pid_cmd, logoutput=True, tries=20, try_sleep=10)
                except:
                    show_logs(params.flume_log_dir, params.flume_user)
                    raise

        pass
    elif action == 'stop':
        # desired state for service should be INSTALLED
        if len(params.flume_command_targets) == 0:
            _set_desired_state('INSTALLED')

        pid_files = glob.glob(params.flume_run_dir + os.sep + "*.pid")

        if 0 == len(pid_files):
            return

        agent_names = cmd_target_names()

        for agent in agent_names:
            pid_file = format("{flume_run_dir}/{agent}.pid")

            if is_flume_process_live(pid_file):
                pid = shell.checked_call(("cat", pid_file),
                                         sudo=True)[1].strip()
                Execute(("kill", "-15", pid),
                        sudo=True)  # kill command has to be a tuple
                if not await_flume_process_termination(pid_file, try_count=30):
                    Execute(("kill", "-9", pid), sudo=True)

            if not await_flume_process_termination(pid_file, try_count=10):
                show_logs(params.flume_log_dir, params.flume_user)
                raise Fail("Can't stop flume agent: {0}".format(agent))

            File(pid_file, action='delete')
コード例 #7
0
    def configure(self, env):
        import params
        Directory([
            params.superset_pid_dir, params.superset_log_dir,
            params.superset_config_dir, params.superset_home_dir
        ],
                  mode=0755,
                  cd_access='a',
                  owner=params.superset_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)

        File(format("{params.superset_config_dir}/superset-env.sh"),
             mode=0755,
             owner=params.superset_user,
             group=params.user_group,
             content=InlineTemplate(params.superset_env_sh_template))

        File(os.path.join(params.superset_bin_dir, 'superset.sh'),
             owner=params.superset_user,
             group=params.user_group,
             mode=0755,
             content=Template("superset.sh"))
        superset_config = mutable_config_dict(
            params.config["configurations"]["superset"])
        if 'AUTH_TYPE' in superset_config and superset_config[
                'AUTH_TYPE'] in params.AUTH_NAME_TO_AUTH_ID_MAP:
            superset_config['AUTH_TYPE'] = params.AUTH_NAME_TO_AUTH_ID_MAP[
                superset_config['AUTH_TYPE']]

        if params.superset_db_uri:
            superset_config["SQLALCHEMY_DATABASE_URI"] = params.superset_db_uri

        CeleryConfig = '''
class CeleryConfig(object):
    BROKER_URL = 'redis://localhost:6379/0'
    CELERY_IMPORTS = ('superset.sql_lab', )
    CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
    CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}}        
        '''
        superset_config['CELERY_CONFIG'] = 'CeleryConfig'
        PropertiesFile("superset_config.py",
                       dir=params.superset_config_dir,
                       properties=quote_string_values(
                           superset_config, params.non_quoted_configs,
                           CeleryConfig),
                       owner=params.superset_user,
                       group=params.user_group)

        # Initialize DB and create admin user.
        Execute(format(
            "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset db upgrade"
        ),
                user=params.superset_user)
        Execute(format(
            "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/fabmanager create-admin --app superset --username '{params.superset_admin_user}' --password '{params.superset_admin_password!p}' --firstname '{params.superset_admin_firstname}' --lastname '{params.superset_admin_lastname}' --email '{params.superset_admin_email}'"
        ),
                user=params.superset_user)
        Execute(format(
            "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset init"
        ),
                user=params.superset_user)

        # Configure Druid Cluster in superset DB
        if len(params.druid_coordinator_hosts) > 0:
            Execute(format(
                "source {params.superset_config_dir}/superset-env.sh ; {params.superset_bin_dir}/superset configure_druid_cluster --name druid-ambari --coordinator-host {params.druid_coordinator_host} --coordinator-port {params.druid_coordinator_port} --broker-host {params.druid_router_host} --broker-port {params.druid_router_port} --coordinator-endpoint druid/coordinator/v1 --broker-endpoint druid/v2"
            ),
                    user=params.superset_user)
            generate_logfeeder_input_config(
                'superset',
                Template("input.config-superset.json.j2",
                         extra_imports=[default]))
コード例 #8
0
def setup_ranger_admin(upgrade_type=None):
    import params

    if upgrade_type is None:
        upgrade_type = Script.get_upgrade_type(
            default("/commandParams/upgrade_type", ""))

    ranger_home = params.ranger_home
    ranger_conf = params.ranger_conf

    Directory(ranger_conf,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True)

    copy_jdbc_connector(ranger_home)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}/{check_db_connection_jar_name}")),
        mode=0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
        cp = cp + os.pathsep + format("{ranger_home}/ews/lib/sajdbc4.jar")
    else:
        cp = cp + os.pathsep + format("{driver_curl_target}")
    cp = cp + os.pathsep + format("{ranger_home}/ews/lib/*")

    db_connection_check_command = format(
        "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_jdbc_connection_url}' {ranger_db_user} {ranger_db_password!p} {ranger_jdbc_driver}"
    )

    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
        env_dict = {'LD_LIBRARY_PATH': params.ld_lib_path}

    Execute(db_connection_check_command,
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            tries=5,
            try_sleep=10,
            environment=env_dict)

    Execute(
        ('ln', '-sf', format('{ranger_home}/ews/webapp/WEB-INF/classes/conf'),
         format('{ranger_home}/conf')),
        not_if=format("ls {ranger_home}/conf"),
        only_if=format("ls {ranger_home}/ews/webapp/WEB-INF/classes/conf"),
        sudo=True)

    if upgrade_type is not None:
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')

        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    Directory(
        format('{ranger_home}/'),
        owner=params.unix_user,
        group=params.unix_group,
        recursive_ownership=True,
    )

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    if params.stack_supports_pid:
        File(
            format('{ranger_conf}/ranger-admin-env-piddir.sh'),
            content=format(
                "export RANGER_PID_DIR_PATH={ranger_pid_dir}\nexport RANGER_USER={unix_user}"
            ),
            owner=params.unix_user,
            group=params.unix_group,
            mode=0755)

    Directory(params.admin_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              create_parents=True,
              cd_access='a',
              mode=0755)

    File(format('{ranger_conf}/ranger-admin-env-logdir.sh'),
         content=format("export RANGER_ADMIN_LOG_DIR={admin_log_dir}"),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)

    if os.path.isfile(params.ranger_admin_default_file):
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.ranger_admin_default_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/ranger-admin-default-site.xml'
        )
        dst_file = format('{ranger_home}/conf/ranger-admin-default-site.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.ranger_admin_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.security_app_context_file):
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)
    else:
        Logger.warning(
            'Required file {0} does not exist, copying the file to {1} path'.
            format(params.security_app_context_file, ranger_conf))
        src_file = format(
            '{ranger_home}/ews/webapp/WEB-INF/classes/conf.dist/security-applicationContext.xml'
        )
        dst_file = format('{ranger_home}/conf/security-applicationContext.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)
        File(params.security_app_context_file,
             owner=params.unix_user,
             group=params.unix_group)

    if upgrade_type is not None and params.stack_supports_config_versioning:
        if os.path.islink('/usr/bin/ranger-admin'):
            Link('/usr/bin/ranger-admin', action="delete")

        Link('/usr/bin/ranger-admin',
             to=format('{ranger_home}/ews/ranger-admin-services.sh'))

    if default(
            "/configurations/ranger-admin-site/ranger.authentication.method",
            "") == 'PAM':
        d = '/etc/pam.d'
        if os.path.isdir(d):
            if os.path.isfile(os.path.join(d, 'ranger-admin')):
                Logger.info('ranger-admin PAM file already exists.')
            else:
                File(format('{d}/ranger-admin'),
                     content=Template('ranger_admin_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
            if os.path.isfile(os.path.join(d, 'ranger-remote')):
                Logger.info('ranger-remote PAM file already exists.')
            else:
                File(format('{d}/ranger-remote'),
                     content=Template('ranger_remote_pam.j2'),
                     owner=params.unix_user,
                     group=params.unix_group,
                     mode=0644)
        else:
            Logger.error(
                "Unable to use PAM authentication, /etc/pam.d/ directory does not exist."
            )

    Execute(('ln', '-sf', format('{ranger_home}/ews/ranger-admin-services.sh'),
             '/usr/bin/ranger-admin'),
            not_if=format("ls /usr/bin/ranger-admin"),
            only_if=format("ls {ranger_home}/ews/ranger-admin-services.sh"),
            sudo=True)

    # remove plain-text password from xml configs

    ranger_admin_site_copy = {}
    ranger_admin_site_copy.update(
        params.config['configurations']['ranger-admin-site'])
    for prop in params.ranger_admin_password_properties:
        if prop in ranger_admin_site_copy:
            ranger_admin_site_copy[prop] = "_"

    XmlConfig("ranger-admin-site.xml",
              conf_dir=ranger_conf,
              configurations=ranger_admin_site_copy,
              configuration_attributes=params.config['configurationAttributes']
              ['ranger-admin-site'],
              owner=params.unix_user,
              group=params.unix_group,
              mode=0644)

    Directory(
        os.path.join(ranger_conf, 'ranger_jaas'),
        mode=0700,
        owner=params.unix_user,
        group=params.unix_group,
    )

    if params.stack_supports_ranger_log4j:
        File(format('{ranger_home}/ews/webapp/WEB-INF/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=InlineTemplate(params.admin_log4j),
             mode=0644)

    do_keystore_setup(upgrade_type=upgrade_type)

    create_core_site_xml(ranger_conf)

    if params.stack_supports_ranger_kerberos and params.security_enabled:
        if params.is_hbase_ha_enabled and params.ranger_hbase_plugin_enabled:
            XmlConfig(
                "hbase-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hbase-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hbase-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)

        if params.is_namenode_ha_enabled and params.ranger_hdfs_plugin_enabled:
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=ranger_conf,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.unix_user,
                group=params.unix_group,
                mode=0644)
コード例 #9
0
ファイル: hive.py プロジェクト: prelongs/ambari
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # HDP 2.1.* or lower
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, "2.2.0.0") < 0:
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        # HDP 2.2 or higher, copy mapreduce.tar.gz to HDFS
        if params.hdp_stack_version_major != "" and compare_versions(
                params.hdp_stack_version_major, '2.2') >= 0:
            copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
            copy_to_hdfs("tez", params.user_group, params.hdfs_user)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        # This can use a different source and dest location to account for both HDP 2.1 and 2.2
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file)
        # ******* End Copy Tarballs *******
        # *********************************

        # Create Hive Metastore Warehouse Dir
        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=0777)

        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        if not is_empty(params.hive_exec_scratchdir) and not urlparse(
                params.hive_exec_scratchdir).path.startswith("/tmp"):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0644)

    setup_atlas_hive()

    if params.hive_specific_configs_supported and name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              recursive=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if name == 'metastore' or name == 'hiveserver2':
        jdbc_connector()

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p}")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p}"),
                params.hive_user)

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

    if name != "client":
        crt_directory(params.hive_pid_dir)
        crt_directory(params.hive_log_dir)
        crt_directory(params.hive_var_lib)
コード例 #10
0
ファイル: storm.py プロジェクト: zengzhaozheng/ambari
def storm(name=None):
    import params
    import os

    Directory(params.log_dir,
              owner=params.storm_user,
              group=params.user_group,
              mode=0777,
              create_parents=True)

    Directory(
        [params.pid_dir, params.local_dir],
        owner=params.storm_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    Directory(
        params.conf_dir,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
    )

    File(format("{conf_dir}/config.yaml"),
         content=Template("config.yaml.j2"),
         owner=params.storm_user,
         group=params.user_group)

    configurations = params.config['configurations']['storm-site']

    File(format("{conf_dir}/storm.yaml"),
         content=yaml_config_template(configurations),
         owner=params.storm_user,
         group=params.user_group)

    File(format("{conf_dir}/storm-env.sh"),
         owner=params.storm_user,
         content=InlineTemplate(params.storm_env_sh_template))

    if params.has_atlas:
        atlas_storm_hook_dir = os.path.join(params.atlas_home_dir, "hook",
                                            "storm")
        if os.path.exists(atlas_storm_hook_dir):
            storm_extlib_dir = os.path.join(params.storm_component_home_dir,
                                            "extlib")
            if os.path.exists(storm_extlib_dir):
                src_files = os.listdir(atlas_storm_hook_dir)
                for file_name in src_files:
                    atlas_storm_hook_file_name = os.path.join(
                        atlas_storm_hook_dir, file_name)
                    storm_lib_file_name = os.path.join(storm_extlib_dir,
                                                       file_name)
                    if (os.path.isfile(atlas_storm_hook_file_name)):
                        Link(storm_lib_file_name,
                             to=atlas_storm_hook_file_name)

    if params.has_metric_collector:
        File(format("{conf_dir}/storm-metrics2.properties"),
             owner=params.storm_user,
             group=params.user_group,
             content=Template("storm-metrics2.properties.j2"))

        # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
        Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
             action="delete")
        # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
        Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar",
             action="delete")

        Execute(format(
            "{sudo} ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"
        ),
                not_if=format(
                    "ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
                only_if=format("ls {metric_collector_sink_jar}"))

    if params.storm_logs_supported:
        Directory(params.log4j_dir,
                  owner=params.storm_user,
                  group=params.user_group,
                  mode=0755,
                  create_parents=True)

        File(format("{log4j_dir}/cluster.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_cluster_log4j_content))
        File(format("{log4j_dir}/worker.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_worker_log4j_content))

    if params.security_enabled:
        TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                       owner=params.storm_user)
        if params.stack_version_formatted and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
            TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                           owner=params.storm_user)
            minRuid = configurations[
                '_storm.min.ruid'] if configurations.has_key(
                    '_storm.min.ruid') else ''

            min_user_ruid = int(
                minRuid) if minRuid.isdigit() else _find_real_user_min_uid()

            File(format("{conf_dir}/worker-launcher.cfg"),
                 content=Template("worker-launcher.cfg.j2",
                                  min_user_ruid=min_user_ruid),
                 owner='root',
                 group=params.user_group)
コード例 #11
0
ファイル: hive.py プロジェクト: gbasehd/GBase-Ambari
def hive(name=None):
    import params

    if name == 'hiveserver2':
        # BigInsights 4.0.* or lower
        if params.stack_version != "" and compare_versions(
                params.stack_version, "4.1.0.0") < 0:
            params.HdfsResource(params.webhcat_apps_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.webhcat_user,
                                mode=0755)

        # Create webhcat dirs.
        if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
            params.HdfsResource(params.hcat_hdfs_user_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hcat_user,
                                mode=params.hcat_hdfs_user_mode)

        params.HdfsResource(params.webhcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.webhcat_hdfs_user_mode)

        # ****** Begin Copy Tarballs ******
        # *********************************
        if params.stack_version != "" and compare_versions(
                params.stack_version, '4.0.0.0') >= 0:
            copy_to_hdfs("mapreduce",
                         params.user_group,
                         params.hdfs_user,
                         skip=params.host_sys_prepped)

        # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
        copy_to_hdfs("pig",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.pig_tar_source,
                     custom_dest_file=params.pig_tar_dest_file,
                     skip=params.host_sys_prepped)
        copy_to_hdfs("hive",
                     params.user_group,
                     params.hdfs_user,
                     file_mode=params.tarballs_mode,
                     custom_source_file=params.hive_tar_source,
                     custom_dest_file=params.hive_tar_dest_file,
                     skip=params.host_sys_prepped)

        wildcard_tarballs = ["sqoop", "hadoop_streaming"]
        for tarball_name in wildcard_tarballs:
            source_file_pattern = eval("params." + tarball_name +
                                       "_tar_source")
            dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

            if source_file_pattern is None or dest_dir is None:
                continue

            source_files = glob.glob(
                source_file_pattern) if "*" in source_file_pattern else [
                    source_file_pattern
                ]
            for source_file in source_files:
                src_filename = os.path.basename(source_file)
                dest_file = os.path.join(dest_dir, src_filename)

                copy_to_hdfs(tarball_name,
                             params.user_group,
                             params.hdfs_user,
                             file_mode=params.tarballs_mode,
                             custom_source_file=source_file,
                             custom_dest_file=dest_file,
                             skip=params.host_sys_prepped)
        # ******* End Copy Tarballs *******
        # *********************************

        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=0770)
        # Create Hive User Dir
        params.HdfsResource(params.hive_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            mode=params.hive_hdfs_user_mode)

        # hive.exec.scratchdir should be created via hive_user
        # otherwise, hive.start.cleanup.scratchdir won't work, because ambari services always started by hive_user
        if not is_empty(params.hive_exec_scratchdir):
            params.HdfsResource(
                params.hive_exec_scratchdir,
                type="directory",
                action="create_on_execute",
                owner=params.hive_user,
                group=params.hdfs_user,
                mode=0777
            )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

        params.HdfsResource(None, action="execute")

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    hive_site_permission = 0644
    if name in ["hiveserver2", "metastore"]:
        hive_site_permission = 0640
    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=hive_site_permission)

    if params.hive_specific_configs_supported and name == 'hiveserver2':
        XmlConfig(
            "hiveserver2-site.xml",
            conf_dir=params.hive_server_conf_dir,
            configurations=params.config['configurations']['hiveserver2-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hiveserver2-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if (name == 'metastore' or name
            == 'hiveserver2') and not os.path.exists(params.hive_jdbc_target):
        jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name == 'metastore':
        File(params.start_metastore_path,
             mode=0755,
             content=StaticFile('startMetastore.sh'))
        if params.init_metastore_schema:
            create_schema_cmd = format(
                "export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                "{hive_bin}/schematool -initSchema "
                "-dbType {hive_metastore_db_type} "
                "-userName {hive_metastore_user_name} "
                "-passWord {hive_metastore_user_passwd!p}")

            check_schema_created_cmd = as_user(
                format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
                       "{hive_bin}/schematool -info "
                       "-dbType {hive_metastore_db_type} "
                       "-userName {hive_metastore_user_name} "
                       "-passWord {hive_metastore_user_passwd!p}"),
                params.hive_user)

            # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
            # Fixing it with the hack below:
            quoted_hive_metastore_user_passwd = quote_bash_args(
                quote_bash_args(params.hive_metastore_user_passwd))
            if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
                or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
                quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[
                    1:-1]
            Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(
                check_schema_created_cmd.replace(
                    format("-passWord {quoted_hive_metastore_user_passwd}"),
                    "-passWord " + utils.PASSWORDS_HIDE_STRING))

            Execute(create_schema_cmd,
                    not_if=check_schema_created_cmd,
                    user=params.hive_user)
    elif name == 'hiveserver2':
        File(params.start_hiveserver2_path,
             mode=0755,
             content=Template(format('{start_hiveserver2_script}')))

    if name != "client":
        crt_directory(params.hive_pid_dir)
        crt_directory(params.hive_log_dir)
        crt_directory(params.hive_var_lib)
コード例 #12
0
def setup_usersync(upgrade_type=None):
    import params

    usersync_home = params.usersync_home
    ranger_home = params.ranger_home
    ranger_ugsync_conf = params.ranger_ugsync_conf

    if not is_empty(
            params.ranger_usersync_ldap_ldapbindpassword
    ) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
        password_validation(params.ranger_usersync_ldap_ldapbindpassword)

    Directory(params.ranger_pid_dir,
              mode=0755,
              owner=params.unix_user,
              group=params.user_group,
              cd_access="a",
              create_parents=True)

    Directory(params.usersync_log_dir,
              owner=params.unix_user,
              group=params.unix_group,
              cd_access='a',
              create_parents=True,
              mode=0755,
              recursive_ownership=True)

    Directory(format("{ranger_ugsync_conf}/"), owner=params.unix_user)

    generate_logfeeder_input_config(
        'ranger',
        Template("input.config-ranger.json.j2", extra_imports=[default]))

    if upgrade_type is not None:
        src_file = format(
            '{usersync_home}/conf.dist/ranger-ugsync-default.xml')
        dst_file = format('{usersync_home}/conf/ranger-ugsync-default.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    if params.stack_supports_ranger_log4j:
        File(format('{usersync_home}/conf/log4j.properties'),
             owner=params.unix_user,
             group=params.unix_group,
             content=InlineTemplate(params.usersync_log4j),
             mode=0644)
    elif upgrade_type is not None and not params.stack_supports_ranger_log4j:
        src_file = format('{usersync_home}/conf.dist/log4j.xml')
        dst_file = format('{usersync_home}/conf/log4j.xml')
        Execute(('cp', '-f', src_file, dst_file), sudo=True)

    # remove plain-text password from xml configs
    ranger_ugsync_site_copy = {}
    ranger_ugsync_site_copy.update(
        params.config['configurations']['ranger-ugsync-site'])
    for prop in params.ranger_usersync_password_properties:
        if prop in ranger_ugsync_site_copy:
            ranger_ugsync_site_copy[prop] = "_"

    XmlConfig("ranger-ugsync-site.xml",
              conf_dir=ranger_ugsync_conf,
              configurations=ranger_ugsync_site_copy,
              configuration_attributes=params.config['configurationAttributes']
              ['ranger-ugsync-site'],
              owner=params.unix_user,
              group=params.unix_group,
              mode=0644)

    if os.path.isfile(params.ranger_ugsync_default_file):
        File(params.ranger_ugsync_default_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.usgsync_log4j_file):
        File(params.usgsync_log4j_file,
             owner=params.unix_user,
             group=params.unix_group)

    if os.path.isfile(params.cred_validator_file):
        File(params.cred_validator_file, group=params.unix_group, mode=0750)

    if os.path.isfile(params.pam_cred_validator_file):
        File(params.pam_cred_validator_file,
             group=params.unix_group,
             mode=0750)

    ranger_credential_helper(params.ugsync_cred_lib,
                             'usersync.ssl.key.password',
                             params.ranger_usersync_keystore_password,
                             params.ugsync_jceks_path)

    if not is_empty(
            params.ranger_usersync_ldap_ldapbindpassword
    ) and params.ug_sync_source == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder':
        ranger_credential_helper(params.ugsync_cred_lib,
                                 'ranger.usersync.ldap.bindalias',
                                 params.ranger_usersync_ldap_ldapbindpassword,
                                 params.ugsync_jceks_path)

    ranger_credential_helper(params.ugsync_cred_lib,
                             'usersync.ssl.truststore.password',
                             params.ranger_usersync_truststore_password,
                             params.ugsync_jceks_path)

    File(params.ugsync_jceks_path,
         owner=params.unix_user,
         group=params.unix_group,
         only_if=format("test -e {ugsync_jceks_path}"),
         mode=0640)

    update_dot_jceks_crc_ownership(
        credential_provider_path=params.ugsync_jceks_path,
        user=params.unix_user,
        group=params.unix_group)

    File(
        params.usersync_services_file,
        mode=0755,
    )

    if not os.path.isfile(params.ranger_usersync_keystore_file):
        cmd = format(
            "{java_home}/bin/keytool -genkeypair -keyalg RSA -alias selfsigned -keystore '{ranger_usersync_keystore_file}' -keypass {ranger_usersync_keystore_password!p} -storepass {ranger_usersync_keystore_password!p} -validity 3600 -keysize 2048 -dname '{default_dn_name}'"
        )

        Execute(cmd, logoutput=True, user=params.unix_user)

        File(params.ranger_usersync_keystore_file,
             owner=params.unix_user,
             group=params.user_group,
             only_if=format("test -e {ranger_usersync_keystore_file}"),
             mode=0640)

    create_core_site_xml(ranger_ugsync_conf)

    File(format("{ranger_ugsync_conf}/ranger-usersync-env.sh"),
         content=InlineTemplate(params.ranger_env_content),
         owner=params.unix_user,
         group=params.unix_group,
         mode=0755)
コード例 #13
0
ファイル: yarn.py プロジェクト: gzsombor/ambari
def yarn(name=None, config_dir=None):
    """
  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
  """
    import params

    install_lzo_if_needed()

    if config_dir is None:
        config_dir = params.hadoop_conf_dir

    if params.yarn_nodemanager_recovery_dir:
        Directory(
            InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            mode=0755,
            cd_access='a',
        )

    Directory(
        [params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [
            params.mapred_pid_dir_prefix, params.mapred_pid_dir,
            params.mapred_log_dir_prefix, params.mapred_log_dir
        ],
        owner=params.mapred_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [params.yarn_log_dir_prefix],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        ignore_failures=True,
        cd_access='a',
    )

    # Some of these function calls depend on the directories above being created first.
    if name == 'resourcemanager':
        setup_resourcemanager()
    elif name == 'nodemanager':
        setup_nodemanager()
    elif name == 'apptimelineserver':
        setup_ats()
    elif name == 'historyserver':
        setup_historyserver()

    XmlConfig("core-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['core-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['core-site'],
              owner=params.hdfs_user,
              group=params.user_group,
              mode=0644)

    # During RU, Core Masters and Slaves need hdfs-site.xml
    # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
    # RU should rely on all available in <stack-root>/<version>/hadoop/conf
    XmlConfig("hdfs-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['hdfs-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['hdfs-site'],
              owner=params.hdfs_user,
              group=params.user_group,
              mode=0644)

    XmlConfig("mapred-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['mapred-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['mapred-site'],
              owner=params.yarn_user,
              group=params.user_group,
              mode=0644)

    XmlConfig("yarn-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['yarn-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['yarn-site'],
              owner=params.yarn_user,
              group=params.user_group,
              mode=0644)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configurationAttributes']
        ['capacity-scheduler'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    File(format("{limits_conf_dir}/yarn.conf"),
         mode=0644,
         content=Template('yarn.conf.j2'))

    File(format("{limits_conf_dir}/mapreduce.conf"),
         mode=0644,
         content=Template('mapreduce.conf.j2'))

    File(os.path.join(config_dir, "yarn-env.sh"),
         owner=params.yarn_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.yarn_env_sh_template))

    File(format("{yarn_container_bin}/container-executor"),
         group=params.yarn_executor_container_group,
         mode=params.container_executor_mode)

    File(os.path.join(config_dir, "container-executor.cfg"),
         group=params.user_group,
         mode=0644,
         content=Template('container-executor.cfg.j2'))

    Directory(params.cgroups_dir,
              group=params.user_group,
              create_parents=True,
              mode=0755,
              cd_access="a")

    File(os.path.join(config_dir, "mapred-env.sh"),
         owner=params.tc_owner,
         mode=0755,
         content=InlineTemplate(params.mapred_env_sh_template))

    if params.security_enabled:
        File(os.path.join(params.hadoop_bin, "task-controller"),
             owner="root",
             group=params.mapred_tt_group,
             mode=06050)
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             mode=params.tc_mode,
             group=params.mapred_tt_group,
             content=Template("taskcontroller.cfg.j2"))
        File(os.path.join(config_dir, 'yarn_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             content=Template("yarn_jaas.conf.j2"))
        if params.has_ats:
            File(os.path.join(config_dir, 'yarn_ats_jaas.conf'),
                 owner=params.yarn_user,
                 group=params.user_group,
                 content=Template("yarn_ats_jaas.conf.j2"))
        File(os.path.join(config_dir, 'yarn_nm_jaas.conf'),
             owner=params.yarn_user,
             group=params.user_group,
             content=Template("yarn_nm_jaas.conf.j2"))
        if params.has_hs:
            File(os.path.join(config_dir, 'mapred_jaas.conf'),
                 owner=params.mapred_user,
                 group=params.user_group,
                 content=Template("mapred_jaas.conf.j2"))
    else:
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=params.tc_owner,
             content=Template("taskcontroller.cfg.j2"))

    XmlConfig("mapred-site.xml",
              conf_dir=config_dir,
              configurations=params.config['configurations']['mapred-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['mapred-site'],
              owner=params.mapred_user,
              group=params.user_group)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configurationAttributes']
        ['capacity-scheduler'],
        owner=params.hdfs_user,
        group=params.user_group)

    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configurationAttributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)
    if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
        File(os.path.join(config_dir, 'fair-scheduler.xml'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-client.xml.example')):
        File(os.path.join(config_dir, 'ssl-client.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-server.xml.example')):
        File(os.path.join(config_dir, 'ssl-server.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)
コード例 #14
0
def yarn(name=None, config_dir=None):
    """
  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
  """
    import params

    if config_dir is None:
        config_dir = params.hadoop_conf_dir

    if name == "historyserver":
        if params.yarn_log_aggregation_enabled:
            params.HdfsResource(params.yarn_nm_app_log_dir,
                                action="create_on_execute",
                                type="directory",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0777,
                                recursive_chmod=True)

        # create the /tmp folder with proper permissions if it doesn't exist yet
        if params.entity_file_history_directory.startswith('/tmp'):
            params.HdfsResource(
                params.hdfs_tmp_dir,
                action="create_on_execute",
                type="directory",
                owner=params.hdfs_user,
                mode=0777,
            )

        params.HdfsResource(params.entity_file_history_directory,
                            action="create_on_execute",
                            type="directory",
                            owner=params.yarn_user,
                            group=params.user_group)
        params.HdfsResource("/mapred",
                            type="directory",
                            action="create_on_execute",
                            owner=params.mapred_user)
        params.HdfsResource("/mapred/system",
                            type="directory",
                            action="create_on_execute",
                            owner=params.hdfs_user)
        params.HdfsResource(params.mapreduce_jobhistory_done_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.mapred_user,
                            group=params.user_group,
                            change_permissions_for_parents=True,
                            mode=0777)
        params.HdfsResource(None, action="execute")
        Directory(
            params.jhs_leveldb_state_store_dir,
            owner=params.mapred_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
            recursive_ownership=True,
        )

    #<editor-fold desc="Node Manager Section">
    if name == "nodemanager":

        # First start after enabling/disabling security
        if params.toggle_nm_security:
            Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
                      action='delete')

            # If yarn.nodemanager.recovery.dir exists, remove this dir
            if params.yarn_nodemanager_recovery_dir:
                Directory(InlineTemplate(
                    params.yarn_nodemanager_recovery_dir).get_content(),
                          action='delete')

            # Setting NM marker file
            if params.security_enabled:
                Directory(params.nm_security_marker_dir)
                File(
                    params.nm_security_marker,
                    content=
                    "Marker file to track first start after enabling/disabling security. "
                    "During first start yarn local, log dirs are removed and recreated"
                )
            elif not params.security_enabled:
                File(params.nm_security_marker, action="delete")

        if not params.security_enabled or params.toggle_nm_security:
            # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
            nm_log_dir_to_mount_file_content = handle_mounted_dirs(
                create_log_dir, params.nm_log_dirs,
                params.nm_log_dir_to_mount_file, params)
            # create a history file used by handle_mounted_dirs
            File(params.nm_log_dir_to_mount_file,
                 owner=params.hdfs_user,
                 group=params.user_group,
                 mode=0644,
                 content=nm_log_dir_to_mount_file_content)
            nm_local_dir_to_mount_file_content = handle_mounted_dirs(
                create_local_dir, params.nm_local_dirs,
                params.nm_local_dir_to_mount_file, params)
            File(params.nm_local_dir_to_mount_file,
                 owner=params.hdfs_user,
                 group=params.user_group,
                 mode=0644,
                 content=nm_local_dir_to_mount_file_content)
    #</editor-fold>

    if params.yarn_nodemanager_recovery_dir:
        Directory(
            InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            mode=0755,
            cd_access='a',
        )

    Directory(
        [params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )

    Directory(
        [
            params.mapred_pid_dir_prefix, params.mapred_pid_dir,
            params.mapred_log_dir_prefix, params.mapred_log_dir
        ],
        owner=params.mapred_user,
        group=params.user_group,
        create_parents=True,
        cd_access='a',
    )
    Directory(
        [params.yarn_log_dir_prefix],
        owner=params.yarn_user,
        group=params.user_group,
        create_parents=True,
        ignore_failures=True,
        cd_access='a',
    )

    XmlConfig(
        "core-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['core-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    # During RU, Core Masters and Slaves need hdfs-site.xml
    # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
    # RU should rely on all available in <stack-root>/<version>/hadoop/conf
    if 'hdfs-site' in params.config['configurations']:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hdfs-site'],
            owner=params.hdfs_user,
            group=params.user_group,
            mode=0644)

    XmlConfig(
        "mapred-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['mapred-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['mapred-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "yarn-site.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['yarn-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['yarn-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    XmlConfig(
        "capacity-scheduler.xml",
        conf_dir=config_dir,
        configurations=params.config['configurations']['capacity-scheduler'],
        configuration_attributes=params.config['configuration_attributes']
        ['capacity-scheduler'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    if name == 'resourcemanager':
        Directory(
            params.rm_nodes_exclude_dir,
            mode=0755,
            create_parents=True,
            cd_access='a',
        )
        File(params.exclude_file_path,
             content=Template("exclude_hosts_list.j2"),
             owner=params.yarn_user,
             group=params.user_group)
        if params.include_hosts:
            Directory(
                params.rm_nodes_include_dir,
                mode=0755,
                create_parents=True,
                cd_access='a',
            )
            File(params.include_file_path,
                 content=Template("include_hosts_list.j2"),
                 owner=params.yarn_user,
                 group=params.user_group)
        File(params.yarn_job_summary_log,
             owner=params.yarn_user,
             group=params.user_group)
        if not is_empty(
                params.node_label_enable
        ) and params.node_label_enable or is_empty(
                params.node_label_enable) and params.node_labels_dir:
            params.HdfsResource(params.node_labels_dir,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0700)
            params.HdfsResource(None, action="execute")

    elif name == 'apptimelineserver':
        Directory(
            params.ats_leveldb_dir,
            owner=params.yarn_user,
            group=params.user_group,
            create_parents=True,
            cd_access="a",
        )

        # if stack support application timeline-service state store property (timeline_state_store stack feature)
        if params.stack_supports_timeline_state_store:
            Directory(
                params.ats_leveldb_state_store_dir,
                owner=params.yarn_user,
                group=params.user_group,
                create_parents=True,
                cd_access="a",
            )
        # app timeline server 1.5 directories
        if not is_empty(params.entity_groupfs_store_dir):
            parent_path = os.path.dirname(
                os.path.abspath(params.entity_groupfs_store_dir))
            params.HdfsResource(parent_path,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0755)
            params.HdfsResource(params.entity_groupfs_store_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=params.entity_groupfs_store_dir_mode)
        if not is_empty(params.entity_groupfs_active_dir):
            parent_path = os.path.dirname(
                os.path.abspath(params.entity_groupfs_active_dir))
            params.HdfsResource(parent_path,
                                type="directory",
                                action="create_on_execute",
                                change_permissions_for_parents=True,
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=0755)
            params.HdfsResource(params.entity_groupfs_active_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.yarn_user,
                                group=params.user_group,
                                mode=params.entity_groupfs_active_dir_mode)
        params.HdfsResource(None, action="execute")

    File(format("{limits_conf_dir}/yarn.conf"),
         mode=0644,
         content=Template('yarn.conf.j2'))

    File(format("{limits_conf_dir}/mapreduce.conf"),
         mode=0644,
         content=Template('mapreduce.conf.j2'))

    File(os.path.join(config_dir, "yarn-env.sh"),
         owner=params.yarn_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.yarn_env_sh_template))

    container_executor = format("{yarn_container_bin}/container-executor")
    File(container_executor,
         group=params.yarn_executor_container_group,
         mode=params.container_executor_mode)

    File(os.path.join(config_dir, "container-executor.cfg"),
         group=params.user_group,
         mode=0644,
         content=Template('container-executor.cfg.j2'))

    Directory(params.cgroups_dir,
              group=params.user_group,
              create_parents=True,
              mode=0755,
              cd_access="a")

    if params.security_enabled:
        tc_mode = 0644
        tc_owner = "root"
    else:
        tc_mode = None
        tc_owner = params.hdfs_user

    File(os.path.join(config_dir, "mapred-env.sh"),
         owner=tc_owner,
         mode=0755,
         content=InlineTemplate(params.mapred_env_sh_template))

    if params.security_enabled:
        File(os.path.join(params.hadoop_bin, "task-controller"),
             owner="root",
             group=params.mapred_tt_group,
             mode=06050)
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=tc_owner,
             mode=tc_mode,
             group=params.mapred_tt_group,
             content=Template("taskcontroller.cfg.j2"))
    else:
        File(os.path.join(config_dir, 'taskcontroller.cfg'),
             owner=tc_owner,
             content=Template("taskcontroller.cfg.j2"))

    if "mapred-site" in params.config['configurations']:
        XmlConfig(
            "mapred-site.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['mapred-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['mapred-site'],
            owner=params.mapred_user,
            group=params.user_group)

    if "capacity-scheduler" in params.config['configurations']:
        XmlConfig(
            "capacity-scheduler.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']
            ['capacity-scheduler'],
            configuration_attributes=params.config['configuration_attributes']
            ['capacity-scheduler'],
            owner=params.hdfs_user,
            group=params.user_group)
    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=config_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)
    if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
        File(os.path.join(config_dir, 'fair-scheduler.xml'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-client.xml.example')):
        File(os.path.join(config_dir, 'ssl-client.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)

    if os.path.exists(os.path.join(config_dir, 'ssl-server.xml.example')):
        File(os.path.join(config_dir, 'ssl-server.xml.example'),
             owner=params.mapred_user,
             group=params.user_group)
コード例 #15
0
def hive(name=None):
    import params

    install_lzo_if_needed()

    hive_client_conf_path = format(
        "{stack_root}/current/{component_directory}/conf")
    # Permissions 644 for conf dir (client) files, and 600 for conf.server
    mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600

    Directory(params.hive_etc_dir_prefix, mode=0755)

    # We should change configurations for client as well as for server.
    # The reason is that stale-configs are service-level, not component.
    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)

    params.hive_site_config = update_credential_provider_path(
        params.hive_site_config, 'hive-site',
        os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
        params.hive_user, params.user_group)
    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_config_dir,
        configurations=params.hive_site_config,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=mode_identified)

    # Generate atlas-application.properties.xml file
    if params.enable_atlas_hook:
        atlas_hook_filepath = os.path.join(params.hive_config_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.HIVE,
                         params.hive_atlas_application_properties,
                         atlas_hook_filepath, params.hive_user,
                         params.user_group)

    File(format("{hive_config_dir}/hive-env.sh"),
         owner=params.hive_user,
         group=params.user_group,
         content=InlineTemplate(params.hive_env_sh_template),
         mode=mode_identified)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))
    if params.security_enabled:
        File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("zkmigrator_jaas.conf.j2"))

    File(
        format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
        content=DownloadSource(
            format("{jdk_location}{check_db_connection_jar_name}")),
        mode=0644,
    )

    if name != "client":
        setup_non_client()
    if name == 'hiveserver2':
        setup_hiveserver2()
    if name == 'metastore':
        setup_metastore()
コード例 #16
0
def setup_hiveserver2():
    import params

    File(params.start_hiveserver2_path,
         mode=0755,
         content=Template(format('{start_hiveserver2_script}')))

    File(os.path.join(params.hive_server_conf_dir,
                      "hadoop-metrics2-hiveserver2.properties"),
         owner=params.hive_user,
         group=params.user_group,
         content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
         mode=0600)
    XmlConfig(
        "hiveserver2-site.xml",
        conf_dir=params.hive_server_conf_dir,
        configurations=params.config['configurations']['hiveserver2-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['hiveserver2-site'],
        owner=params.hive_user,
        group=params.user_group,
        mode=0600)

    # copy tarball to HDFS feature not supported
    if not (params.stack_version_formatted_major
            and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS,
                                    params.stack_version_formatted_major)):
        params.HdfsResource(params.webhcat_apps_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=0755)

    # Create webhcat dirs.
    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
        params.HdfsResource(params.hcat_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.webhcat_user,
                            mode=params.hcat_hdfs_user_mode)

    params.HdfsResource(params.webhcat_hdfs_user_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.webhcat_user,
                        mode=params.webhcat_hdfs_user_mode)

    # ****** Begin Copy Tarballs ******
    # *********************************
    #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
    if params.stack_version_formatted_major and check_stack_feature(
            StackFeature.COPY_TARBALL_TO_HDFS,
            params.stack_version_formatted_major):
        copy_tarball.copy_to_hdfs("mapreduce",
                                  params.user_group,
                                  params.hdfs_user,
                                  skip=params.sysprep_skip_copy_tarballs_hdfs)
        copy_tarball.copy_to_hdfs("tez",
                                  params.user_group,
                                  params.hdfs_user,
                                  skip=params.sysprep_skip_copy_tarballs_hdfs)

    # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
    # This can use a different source and dest location to account
    copy_tarball.copy_to_hdfs("pig",
                              params.user_group,
                              params.hdfs_user,
                              file_mode=params.tarballs_mode,
                              custom_source_file=params.pig_tar_source,
                              custom_dest_file=params.pig_tar_dest_file,
                              skip=params.sysprep_skip_copy_tarballs_hdfs)
    copy_tarball.copy_to_hdfs("hive",
                              params.user_group,
                              params.hdfs_user,
                              file_mode=params.tarballs_mode,
                              custom_source_file=params.hive_tar_source,
                              custom_dest_file=params.hive_tar_dest_file,
                              skip=params.sysprep_skip_copy_tarballs_hdfs)

    wildcard_tarballs = ["sqoop", "hadoop_streaming"]
    for tarball_name in wildcard_tarballs:
        source_file_pattern = eval("params." + tarball_name + "_tar_source")
        dest_dir = eval("params." + tarball_name + "_tar_dest_dir")

        if source_file_pattern is None or dest_dir is None:
            continue

        source_files = glob.glob(
            source_file_pattern) if "*" in source_file_pattern else [
                source_file_pattern
            ]
        for source_file in source_files:
            src_filename = os.path.basename(source_file)
            dest_file = os.path.join(dest_dir, src_filename)

            copy_tarball.copy_to_hdfs(
                tarball_name,
                params.user_group,
                params.hdfs_user,
                file_mode=params.tarballs_mode,
                custom_source_file=source_file,
                custom_dest_file=dest_file,
                skip=params.sysprep_skip_copy_tarballs_hdfs)
    # ******* End Copy Tarballs *******
    # *********************************

    # if warehouse directory is in DFS
    if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(
            params.default_fs).scheme:
        # Create Hive Metastore Warehouse Dir
        params.HdfsResource(params.hive_apps_whs_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=params.hive_apps_whs_mode)
    else:
        Logger.info(
            format(
                "Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."
            ))

    # Create Hive User Dir
    params.HdfsResource(params.hive_hdfs_user_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.hive_user,
                        mode=params.hive_hdfs_user_mode)

    if not is_empty(params.hive_exec_scratchdir) and not urlparse(
            params.hive_exec_scratchdir).path.startswith("/tmp"):
        params.HdfsResource(
            params.hive_exec_scratchdir,
            type="directory",
            action="create_on_execute",
            owner=params.hive_user,
            group=params.hdfs_user,
            mode=0777
        )  # Hive expects this dir to be writeable by everyone as it is used as a temp dir

    if params.hive_repl_cmrootdir:
        params.HdfsResource(params.hive_repl_cmrootdir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=01777)
    if params.hive_repl_rootdir is not None:
        params.HdfsResource(params.hive_repl_rootdir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hive_user,
                            group=params.user_group,
                            mode=0700)

    params.HdfsResource(None, action="execute")
コード例 #17
0
    def configure(self, env):
        import params
        import status_params
        env.set_params(params)
        env.set_params(status_params)
        self.create_zeppelin_log_dir(env)

        # create the pid and zeppelin dirs
        Directory([params.zeppelin_pid_dir, params.zeppelin_dir],
                  owner=params.zeppelin_user,
                  group=params.zeppelin_group,
                  cd_access="a",
                  create_parents=True,
                  mode=0755)
        self.chown_zeppelin_pid_dir(env)

        XmlConfig(
            "zeppelin-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['zeppelin-site'],
            owner=params.zeppelin_user,
            group=params.zeppelin_group)
        # write out zeppelin-env.sh
        env_content = InlineTemplate(params.zeppelin_env_content)
        File(format("{params.conf_dir}/zeppelin-env.sh"),
             content=env_content,
             owner=params.zeppelin_user,
             group=params.zeppelin_group)

        # write out shiro.ini
        shiro_ini_content = InlineTemplate(params.shiro_ini_content)
        File(format("{params.conf_dir}/shiro.ini"),
             content=shiro_ini_content,
             owner=params.zeppelin_user,
             group=params.zeppelin_group)

        # write out log4j.properties
        File(format("{params.conf_dir}/log4j.properties"),
             content=params.log4j_properties_content,
             owner=params.zeppelin_user,
             group=params.zeppelin_group)

        self.create_zeppelin_hdfs_conf_dir(env)

        generate_logfeeder_input_config(
            'zeppelin',
            Template("input.config-zeppelin.json.j2", extra_imports=[default]))

        if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed:
            # copy hbase-site.xml
            XmlConfig(
                "hbase-site.xml",
                conf_dir=params.external_dependency_conf,
                configurations=params.config['configurations']['hbase-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hbase-site'],
                owner=params.zeppelin_user,
                group=params.zeppelin_group,
                mode=0644)

            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.external_dependency_conf,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.zeppelin_user,
                group=params.zeppelin_group,
                mode=0644)

            XmlConfig(
                "core-site.xml",
                conf_dir=params.external_dependency_conf,
                configurations=params.config['configurations']['core-site'],
                configuration_attributes=params.
                config['configurationAttributes']['core-site'],
                owner=params.zeppelin_user,
                group=params.zeppelin_group,
                mode=0644,
                xml_include_file=params.
                mount_table_xml_inclusion_file_full_path)

            if params.mount_table_content:
                File(params.mount_table_xml_inclusion_file_full_path,
                     owner=params.zeppelin_user,
                     group=params.zeppelin_group,
                     content=params.mount_table_content,
                     mode=0644)
コード例 #18
0
ファイル: hive_interactive.py プロジェクト: csivaguru/ambari
def hive_interactive(name=None):
    import params

    # list of properties that should be excluded from the config
    # this approach is a compromise against adding a dedicated config
    # type for hive_server_interactive or needed config groups on a
    # per component basis
    exclude_list = ['hive.enforce.bucketing', 'hive.enforce.sorting']

    # Copy Tarballs in HDFS.
    if params.stack_version_formatted_major and check_stack_feature(
            StackFeature.ROLLING_UPGRADE,
            params.stack_version_formatted_major):
        resource_created = copy_to_hdfs(
            "tez_hive2",
            params.user_group,
            params.hdfs_user,
            file_mode=params.tarballs_mode,
            host_sys_prepped=params.host_sys_prepped)

        if resource_created:
            params.HdfsResource(None, action="execute")

    Directory(params.hive_interactive_etc_dir_prefix, mode=0755)

    Logger.info("Directories to fill with configs: %s" %
                str(params.hive_conf_dirs_list))
    for conf_dir in params.hive_conf_dirs_list:
        fill_conf_dir(conf_dir)
    '''
  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
  '''
    merged_hive_interactive_site = {}
    merged_hive_interactive_site.update(
        params.config['configurations']['hive-site'])
    merged_hive_interactive_site.update(
        params.config['configurations']['hive-interactive-site'])
    for item in exclude_list:
        if item in merged_hive_interactive_site.keys():
            del merged_hive_interactive_site[item]
    '''
  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
  '''
    remove_atlas_hook_if_exists(merged_hive_interactive_site)

    # Anything TODO for attributes

    # Merge tez-interactive with tez-site
    XmlConfig(
        "tez-site.xml",
        conf_dir=params.tez_interactive_config_dir,
        configurations=params.config['configurations']['tez-interactive-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['tez-interactive-site'],
        owner=params.tez_interactive_user,
        group=params.user_group,
        mode=0664)

    # Create config files under /etc/hive2/conf and /etc/hive2/conf/conf.server:
    #   hive-site.xml
    #   hive-env.sh
    #   llap-daemon-log4j2.properties
    #   llap-cli-log4j2.properties
    #   hive-log4j2.properties
    #   hive-exec-log4j2.properties
    #   beeline-log4j2.properties

    for conf_dir in params.hive_conf_dirs_list:
        XmlConfig(
            "hive-site.xml",
            conf_dir=conf_dir,
            configurations=merged_hive_interactive_site,
            configuration_attributes=params.config['configuration_attributes']
            ['hive-interactive-site'],
            owner=params.hive_user,
            group=params.user_group,
            mode=0644)

        hive_server_interactive_conf_dir = conf_dir

        File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
             owner=params.hive_user,
             group=params.user_group,
             content=InlineTemplate(params.hive_interactive_env_sh_template))

        llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.llap_daemon_log4j)

        llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.llap_cli_log4j2)

        hive_log4j2_filename = 'hive-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.hive_log4j2)

        hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.hive_exec_log4j2)

        beeline_log4j2_filename = 'beeline-log4j2.properties'
        File(format(
            "{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
             mode=0644,
             group=params.user_group,
             owner=params.hive_user,
             content=params.beeline_log4j2)

        File(format(
            "{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"
        ),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-llapdaemon.j2"))

        File(format(
            "{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"
        ),
             owner=params.hive_user,
             group=params.user_group,
             content=Template("hadoop-metrics2-llaptaskscheduler.j2"))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hive.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hive.conf.j2"))

    if not os.path.exists(params.target_hive_interactive):
        jdbc_connector(params.target_hive_interactive)

    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
         content=DownloadSource(
             format("{jdk_location}{check_db_connection_jar_name}")),
         mode=0644)
    File(params.start_hiveserver2_interactive_path,
         mode=0755,
         content=Template(format('{start_hiveserver2_interactive_script}')))

    Directory(params.hive_pid_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_log_dir,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
    Directory(params.hive_interactive_var_lib,
              create_parents=True,
              cd_access='a',
              owner=params.hive_user,
              group=params.user_group,
              mode=0755)
コード例 #19
0
def setup_ranger_audit_solr():
    import params

    if params.security_enabled and params.stack_supports_ranger_kerberos:

        if params.solr_jaas_file is not None:
            File(format("{solr_jaas_file}"),
                 content=Template("ranger_solr_jaas_conf.j2"),
                 owner=params.unix_user)
    try:
        check_znode()

        if params.stack_supports_ranger_solr_configs:
            Logger.info(
                'Solr configrations supported,creating solr-configurations.')
            File(format("{ranger_solr_conf}/solrconfig.xml"),
                 content=InlineTemplate(params.ranger_solr_config_content),
                 owner=params.unix_user,
                 group=params.unix_group,
                 mode=0644)

            solr_cloud_util.upload_configuration_to_zk(
                zookeeper_quorum=params.zookeeper_quorum,
                solr_znode=params.solr_znode,
                config_set=params.ranger_solr_config_set,
                config_set_dir=params.ranger_solr_conf,
                tmp_dir=params.tmp_dir,
                java64_home=params.ambari_java_home,
                solrconfig_content=InlineTemplate(
                    params.ranger_solr_config_content),
                jaas_file=params.solr_jaas_file,
                retry=30,
                interval=5)

        else:
            Logger.info(
                'Solr configrations not supported, skipping solr-configurations.'
            )
            solr_cloud_util.upload_configuration_to_zk(
                zookeeper_quorum=params.zookeeper_quorum,
                solr_znode=params.solr_znode,
                config_set=params.ranger_solr_config_set,
                config_set_dir=params.ranger_solr_conf,
                tmp_dir=params.tmp_dir,
                java64_home=params.ambari_java_home,
                jaas_file=params.solr_jaas_file,
                retry=30,
                interval=5)

        if params.security_enabled and params.has_infra_solr \
          and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:

            solr_cloud_util.add_solr_roles(
                params.config,
                roles=[
                    params.infra_solr_role_ranger_admin,
                    params.infra_solr_role_ranger_audit,
                    params.infra_solr_role_dev
                ],
                new_service_principals=[params.ranger_admin_jaas_principal])
            service_default_principals_map = [('hdfs', 'nn'),
                                              ('hbase', 'hbase'),
                                              ('hive', 'hive'),
                                              ('kafka', 'kafka'),
                                              ('kms', 'rangerkms'),
                                              ('knox', 'knox'),
                                              ('nifi', 'nifi'),
                                              ('storm', 'storm'),
                                              ('yanr', 'yarn')]
            service_principals = get_ranger_plugin_principals(
                service_default_principals_map)
            solr_cloud_util.add_solr_roles(
                params.config,
                roles=[
                    params.infra_solr_role_ranger_audit,
                    params.infra_solr_role_dev
                ],
                new_service_principals=service_principals)
        solr_cloud_util.create_collection(
            zookeeper_quorum=params.zookeeper_quorum,
            solr_znode=params.solr_znode,
            collection=params.ranger_solr_collection_name,
            config_set=params.ranger_solr_config_set,
            java64_home=params.ambari_java_home,
            shards=params.ranger_solr_shards,
            replication_factor=int(params.replication_factor),
            jaas_file=params.solr_jaas_file,
            trust_store_password=default(
                'configurations/ranger-admin-site/ranger.truststore.file',
                None),
            trust_store_type="JKS" if default(
                'configurations/ranger-admin-site/ranger.truststore.file',
                None) else None,
            trust_store_location=default(
                'configurations/ranger-admin-site/ranger.truststore.password',
                None))

        if params.security_enabled and params.has_infra_solr \
          and not params.is_external_solrCloud_enabled and params.stack_supports_ranger_kerberos:
            secure_znode(
                format('{solr_znode}/configs/{ranger_solr_config_set}'),
                params.solr_jaas_file)
            secure_znode(
                format(
                    '{solr_znode}/collections/{ranger_solr_collection_name}'),
                params.solr_jaas_file)
    except ExecutionFailed as execution_exception:
        Logger.error(
            'Error when configuring Solr for Ranger, Kindly check Solr/Zookeeper services to be up and running:\n {0}'
            .format(execution_exception))
コード例 #20
0
ファイル: setup_logsearch_solr.py プロジェクト: yunlik/ambari
def setup_logsearch_solr(name=None):
    import params

    if name == 'server':
        Directory([
            params.solr_dir, params.logsearch_solr_log_dir,
            params.logsearch_solr_piddir, params.logsearch_solr_conf,
            params.logsearch_solr_datadir,
            params.logsearch_solr_data_resources_dir
        ],
                  mode=0755,
                  cd_access='a',
                  owner=params.logsearch_solr_user,
                  group=params.logsearch_solr_group,
                  create_parents=True)

        File(params.logsearch_solr_log,
             mode=0644,
             owner=params.logsearch_solr_user,
             group=params.logsearch_solr_group,
             content='')

        File(format("{logsearch_solr_conf}/logsearch-solr-env.sh"),
             content=InlineTemplate(params.solr_env_content),
             mode=0755,
             owner=params.logsearch_solr_user)

        File(format("{logsearch_solr_datadir}/solr.xml"),
             content=InlineTemplate(params.solr_xml_content),
             owner=params.logsearch_solr_user)

        File(format("{logsearch_solr_conf}/log4j.properties"),
             content=InlineTemplate(params.solr_log4j_content),
             owner=params.logsearch_solr_user)

        File(format("{logsearch_solr_datadir}/zoo.cfg"),
             content=Template("zoo.cfg.j2"),
             owner=params.logsearch_solr_user)

        zk_cli_prefix = format(
            'export JAVA_HOME={java64_home}; {cloud_scripts}/zkcli.sh -zkhost {zookeeper_hosts}'
        )
        Execute(
            format('{zk_cli_prefix} -cmd makepath {logsearch_solr_znode}'),
            not_if=format("{zk_cli_prefix} -cmd get {logsearch_solr_znode}"),
            ignore_failures=True,
            user=params.logsearch_solr_user)
    elif name == 'client':
        Directory(
            [params.solr_client_dir, params.logsearch_solr_client_log_dir],
            mode=0755,
            cd_access='a',
            owner=params.logsearch_solr_user,
            group=params.logsearch_solr_group,
            create_parents=True)
        solrCliFilename = format("{solr_client_dir}/solrCloudCli.sh")
        File(solrCliFilename,
             mode=0755,
             group=params.logsearch_solr_group,
             owner=params.logsearch_solr_user,
             content=StaticFile(solrCliFilename))

        File(format("{solr_client_dir}/log4j.properties"),
             content=Template("solr-client-log4j.properties.j2"),
             owner=params.logsearch_solr_user)

        File(params.logsearch_solr_client_log,
             mode=0644,
             owner=params.logsearch_solr_user,
             group=params.logsearch_solr_group,
             content='')

    else:
        raise Fail('Nor client or server were selected to install.')
コード例 #21
0
ファイル: storm.py プロジェクト: mbigelow/ambari
def storm(name=None):
  import params

  Directory(params.log_dir,
            owner=params.storm_user,
            group=params.user_group,
            mode=0777,
            recursive=True
  )

  Directory([params.pid_dir, params.local_dir],
            owner=params.storm_user,
            group=params.user_group,
            recursive=True,
            cd_access="a",
  )

  Directory(params.conf_dir,
            group=params.user_group,
            recursive=True,
            cd_access="a",
  )

  File(format("{conf_dir}/config.yaml"),
       content=Template("config.yaml.j2"),
       owner=params.storm_user,
       group=params.user_group
  )

  configurations = params.config['configurations']['storm-site']

  File(format("{conf_dir}/storm.yaml"),
       content=yaml_config_template(configurations),
       owner=params.storm_user,
       group=params.user_group
  )

  if params.has_metric_collector:
    File(format("{conf_dir}/storm-metrics2.properties"),
        owner=params.storm_user,
        group=params.user_group,
        content=Template("storm-metrics2.properties.j2")
    )

    Execute(format("{sudo} ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
            not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
            only_if=format("ls {metric_collector_sink_jar}")
    )

  File(format("{conf_dir}/storm-env.sh"),
    owner=params.storm_user,
    content=InlineTemplate(params.storm_env_sh_template)
  )

  if params.security_enabled:
    TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                   owner=params.storm_user
    )
    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
      TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                     owner=params.storm_user
      )
      minRuid = configurations['_storm.min.ruid'] if configurations.has_key('_storm.min.ruid') else ''
      
      min_user_ruid = int(minRuid) if minRuid.isdigit() else _find_real_user_min_uid()
      
      File(format("{conf_dir}/worker-launcher.cfg"),
           content=Template("worker-launcher.cfg.j2", min_user_ruid = min_user_ruid),
           owner='root',
           group=params.user_group
      )
コード例 #22
0
ファイル: kafka.py プロジェクト: Flipkart/ambari
def kafka(upgrade_type=None):
    import params

    ensure_base_directories()
    kafka_server_config = mutable_config_dict(
        params.config['configurations']['kafka-broker'])
    # This still has an issue of hostnames being alphabetically out-of-order for broker.id in IOP-4.1.
    # Starting in IOP 4.2, Kafka handles the generation of broker.id so Ambari doesn't have to.

    effective_version = params.iop_stack_version if upgrade_type is None else format_stack_version(
        params.version)
    Logger.info(format("Effective stack version: {effective_version}"))

    if effective_version is not None and effective_version != "" and compare_versions(
            effective_version, '4.2.0.0') < 0:
        brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
        kafka_server_config['broker.id'] = brokerid
        Logger.info(format("Calculating broker.id as {brokerid}"))

    # listeners and advertised.listeners are only added in 4.2.0.0 onwards.
    if effective_version is not None and effective_version != "" and compare_versions(
            effective_version, '4.2.0.0') >= 0:
        listeners = kafka_server_config['listeners'].replace(
            "localhost", params.hostname)
        Logger.info(format("Kafka listeners: {listeners}"))

        if params.security_enabled and params.kafka_kerberos_enabled:
            Logger.info("Kafka kerberos security is enabled.")
            if "SASL" not in listeners:
                listeners = listeners.replace("PLAINTEXT", "SASL_PLAINTEXT")

            kafka_server_config['listeners'] = listeners
            kafka_server_config['advertised.listeners'] = listeners
            Logger.info(format("Kafka advertised listeners: {listeners}"))
        else:
            kafka_server_config['listeners'] = listeners

            if 'advertised.listeners' in kafka_server_config:
                advertised_listeners = kafka_server_config[
                    'advertised.listeners'].replace("localhost",
                                                    params.hostname)
                kafka_server_config[
                    'advertised.listeners'] = advertised_listeners
                Logger.info(
                    format(
                        "Kafka advertised listeners: {advertised_listeners}"))
    else:
        kafka_server_config['host.name'] = params.hostname

    if (params.has_metric_collector):
        kafka_server_config[
            'kafka.timeline.metrics.host'] = params.metric_collector_host
        kafka_server_config[
            'kafka.timeline.metrics.port'] = params.metric_collector_port

    kafka_data_dir = kafka_server_config['log.dirs']
    kafka_data_dirs = filter(None, kafka_data_dir.split(","))
    Directory(
        kafka_data_dirs[:],  # Todo: remove list copy when AMBARI-14373 is fixed
        mode=0755,
        cd_access='a',
        owner=params.kafka_user,
        group=params.user_group,
        create_parents=True)
    set_dir_ownership(kafka_data_dirs)

    PropertiesFile(
        "server.properties",
        dir=params.conf_dir,
        properties=kafka_server_config,
        owner=params.kafka_user,
        group=params.user_group,
    )

    File(format("{conf_dir}/kafka-env.sh"),
         owner=params.kafka_user,
         content=InlineTemplate(params.kafka_env_sh_template))

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=params.log4j_props)

    if params.security_enabled and params.kafka_kerberos_enabled:
        TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                       owner=params.kafka_user)

        TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
                       owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("kafka.conf.j2"))
    File(os.path.join(params.conf_dir, 'tools-log4j.properties'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("tools-log4j.properties.j2"))

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #23
0
def oozie(is_server=False):
    import params

    if is_server:
        params.HdfsResource(params.oozie_hdfs_user_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.oozie_user,
                            mode=params.oozie_hdfs_user_mode)
        params.HdfsResource(None, action="execute")
    Directory(params.conf_dir,
              create_parents=True,
              owner=params.oozie_user,
              group=params.user_group)
    XmlConfig(
        "oozie-site.xml",
        conf_dir=params.conf_dir,
        configurations=params.oozie_site,
        configuration_attributes=params.config['configuration_attributes']
        ['oozie-site'],
        owner=params.oozie_user,
        group=params.user_group,
        mode=0664)
    File(
        format("{conf_dir}/oozie-env.sh"),
        owner=params.oozie_user,
        content=InlineTemplate(params.oozie_env_sh_template),
        group=params.user_group,
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("oozie.conf.j2"))

    if (params.log4j_props != None):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=params.log4j_props)
    elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
        File(format("{params.conf_dir}/oozie-log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user)

    if params.stack_version_formatted and check_stack_feature(
            StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
        File(format("{params.conf_dir}/adminusers.txt"),
             mode=0644,
             group=params.user_group,
             owner=params.oozie_user,
             content=Template('adminusers.txt.j2',
                              oozie_admin_users=params.oozie_admin_users))
    else:
        File(format("{params.conf_dir}/adminusers.txt"),
             owner=params.oozie_user,
             group=params.user_group)

    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
       params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
       params.jdbc_driver_name == "org.postgresql.Driver" or \
       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
        File(
            format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
            content=DownloadSource(
                format("{jdk_location}{check_db_connection_jar_name}")),
        )
    pass

    oozie_ownership()

    if is_server:
        oozie_server_specific()
コード例 #24
0
def setup_infra_solr(name=None):
    import params

    if name == 'server':
        Directory([
            params.infra_solr_log_dir, params.infra_solr_piddir,
            params.infra_solr_datadir, params.infra_solr_data_resources_dir
        ],
                  mode=0755,
                  cd_access='a',
                  create_parents=True,
                  owner=params.infra_solr_user,
                  group=params.user_group)

        Directory([params.solr_dir, params.infra_solr_conf],
                  mode=0755,
                  cd_access='a',
                  owner=params.infra_solr_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)

        File(params.infra_solr_log,
             mode=0644,
             owner=params.infra_solr_user,
             group=params.user_group,
             content='')

        File(format("{infra_solr_conf}/infra-solr-env.sh"),
             content=InlineTemplate(params.solr_env_content),
             mode=0755,
             owner=params.infra_solr_user,
             group=params.user_group)

        File(format("{infra_solr_datadir}/solr.xml"),
             content=InlineTemplate(params.solr_xml_content),
             owner=params.infra_solr_user,
             group=params.user_group)

        File(format("{infra_solr_conf}/log4j.properties"),
             content=InlineTemplate(params.solr_log4j_content),
             owner=params.infra_solr_user,
             group=params.user_group)

        custom_security_json_location = format(
            "{infra_solr_conf}/custom-security.json")
        File(custom_security_json_location,
             content=InlineTemplate(params.infra_solr_security_json_content),
             owner=params.infra_solr_user,
             group=params.user_group,
             mode=0640)

        jaas_file = params.infra_solr_jaas_file if params.security_enabled else None
        url_scheme = 'https' if params.infra_solr_ssl_enabled == 'Yes' else 'http'

        create_ambari_solr_znode()

        security_json_file_location = custom_security_json_location \
          if params.infra_solr_security_json_content and str(params.infra_solr_security_json_content).strip() \
          else format("{infra_solr_conf}/security.json") # security.json file to upload

        if params.security_enabled:
            File(format("{infra_solr_jaas_file}"),
                 content=Template("infra_solr_jaas.conf.j2"),
                 owner=params.infra_solr_user)

            File(format("{infra_solr_conf}/security.json"),
                 content=Template("infra-solr-security.json.j2"),
                 owner=params.infra_solr_user,
                 group=params.user_group,
                 mode=0640)

        solr_cloud_util.set_cluster_prop(
            zookeeper_quorum=params.zookeeper_quorum,
            solr_znode=params.infra_solr_znode,
            java64_home=params.java64_home,
            prop_name="urlScheme",
            prop_value=url_scheme,
            jaas_file=jaas_file)

        solr_cloud_util.setup_kerberos_plugin(
            zookeeper_quorum=params.zookeeper_quorum,
            solr_znode=params.infra_solr_znode,
            jaas_file=jaas_file,
            java64_home=params.java64_home,
            secure=params.security_enabled,
            security_json_location=security_json_file_location)

        if params.security_enabled:
            solr_cloud_util.secure_solr_znode(
                zookeeper_quorum=params.zookeeper_quorum,
                solr_znode=params.infra_solr_znode,
                jaas_file=jaas_file,
                java64_home=params.java64_home,
                sasl_users_str=params.infra_solr_sasl_user)

    elif name == 'client':
        solr_cloud_util.setup_solr_client(params.config)

    else:
        raise Fail('Nor client or server were selected to install.')
コード例 #25
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(format('{conf_dir}/solr'),
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))

        if not is_empty(params.atlas_admin_username) and not is_empty(
                params.atlas_admin_password):
            psswd_output = hashlib.sha256(
                params.atlas_admin_password).hexdigest()
            ModifyPropertiesFile(
                format("{conf_dir}/users-credentials.properties"),
                properties={
                    format('{atlas_admin_username}'):
                    format('ROLE_ADMIN::{psswd_output}')
                },
                owner=params.metadata_user)

        files_to_chown = [
            format("{conf_dir}/policy-store.txt"),
            format("{conf_dir}/users-credentials.properties")
        ]
        for file in files_to_chown:
            if os.path.exists(file):
                Execute(
                    ('chown', format('{metadata_user}:{user_group}'), file),
                    sudo=True)
                Execute(('chmod', '644', file), sudo=True)

        if params.metadata_solrconfig_content:
            File(format("{conf_dir}/solr/solrconfig.xml"),
                 mode=0644,
                 owner=params.metadata_user,
                 group=params.user_group,
                 content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
        solr_cloud_util.setup_solr_client(params.config)
        check_znode()
        jaasFile = params.atlas_jaas_file if params.security_enabled else None
        upload_conf_set('atlas_configs', jaasFile)

        if params.security_enabled:  # update permissions before creating the collections
            solr_cloud_util.add_solr_roles(
                params.config,
                roles=[
                    params.infra_solr_role_atlas,
                    params.infra_solr_role_ranger_audit,
                    params.infra_solr_role_dev
                ],
                new_service_principals=[params.atlas_jaas_principal])

        create_collection('vertex_index', 'atlas_configs', jaasFile)
        create_collection('edge_index', 'atlas_configs', jaasFile)
        create_collection('fulltext_index', 'atlas_configs', jaasFile)

        if params.security_enabled:
            secure_znode(format('{infra_solr_znode}/configs/atlas_configs'),
                         jaasFile)
            secure_znode(format('{infra_solr_znode}/collections/vertex_index'),
                         jaasFile)
            secure_znode(format('{infra_solr_znode}/collections/edge_index'),
                         jaasFile)
            secure_znode(
                format('{infra_solr_znode}/collections/fulltext_index'),
                jaasFile)

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2"))

    is_atlas_upgrade_support = check_stack_feature(
        StackFeature.ATLAS_UPGRADE_SUPPORT,
        get_stack_feature_version(params.config))

    if is_atlas_upgrade_support and params.security_enabled:

        File(params.atlas_kafka_setup,
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("atlas_kafka_acl.sh.j2"))

        #  files required only in case if kafka broker is not present on the host as configured component
        if not params.host_with_kafka:
            File(format("{kafka_conf_dir}/kafka-env.sh"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_env_sh_template))

            File(format("{kafka_conf_dir}/kafka_jaas.conf"),
                 group=params.user_group,
                 owner=params.kafka_user,
                 content=Template("kafka_jaas.conf.j2"))

    if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(
            params.namenode_host) > 1:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hdfs-site'],
            owner=params.metadata_user,
            group=params.user_group,
            mode=0644)
    else:
        File(format('{conf_dir}/hdfs-site.xml'), action="delete")
コード例 #26
0
ファイル: setup_logsearch.py プロジェクト: stevens515/ambari
def setup_logsearch():
    import params

    Directory([params.logsearch_log_dir, params.logsearch_pid_dir],
              mode=0755,
              cd_access='a',
              owner=params.logsearch_user,
              group=params.user_group,
              create_parents=True)

    Directory([
        params.logsearch_dir, params.logsearch_server_conf,
        params.logsearch_config_set_dir
    ],
              mode=0755,
              cd_access='a',
              owner=params.logsearch_user,
              group=params.user_group,
              create_parents=True,
              recursive_ownership=True)

    Directory(params.logsearch_server_keys_folder,
              cd_access='a',
              mode=0755,
              owner=params.logsearch_user,
              group=params.user_group)

    File(format("{logsearch_log_dir}/{logsearch_log}"),
         mode=0644,
         owner=params.logsearch_user,
         group=params.user_group,
         content='')

    if params.credential_store_enabled:
        params.logsearch_env_config = update_credential_provider_path(
            params.logsearch_env_config, 'logsearch-env',
            params.logsearch_env_jceks_file, params.logsearch_user,
            params.user_group)
        params.logsearch_properties[
            HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file' + params.logsearch_env_jceks_file
        File(format("{logsearch_server_keys_folder}/ks_pass.txt"),
             action="delete")
        File(format("{logsearch_server_keys_folder}/ts_pass.txt"),
             action="delete")
    else:
        File(format("{logsearch_server_keys_folder}/ks_pass.txt"),
             content=params.logsearch_keystore_password,
             mode=0600,
             owner=params.logsearch_user,
             group=params.user_group)
        File(format("{logsearch_server_keys_folder}/ts_pass.txt"),
             content=params.logsearch_truststore_password,
             mode=0600,
             owner=params.logsearch_user,
             group=params.user_group)

    PropertiesFile(format("{logsearch_server_conf}/logsearch.properties"),
                   properties=params.logsearch_properties)

    File(format("{logsearch_server_conf}/HadoopServiceConfig.json"),
         content=Template("HadoopServiceConfig.json.j2"),
         owner=params.logsearch_user,
         group=params.user_group)

    File(format("{logsearch_server_conf}/log4j.xml"),
         content=InlineTemplate(params.logsearch_app_log4j_content),
         owner=params.logsearch_user,
         group=params.user_group)

    File(format("{logsearch_server_conf}/logsearch-env.sh"),
         content=InlineTemplate(params.logsearch_env_content),
         mode=0755,
         owner=params.logsearch_user,
         group=params.user_group)

    File(format("{logsearch_server_conf}/logsearch-admin.json"),
         content=InlineTemplate(params.logsearch_admin_content),
         owner=params.logsearch_user,
         group=params.user_group)

    File(format("{logsearch_config_set_dir}/hadoop_logs/conf/solrconfig.xml"),
         content=InlineTemplate(
             params.logsearch_service_logs_solrconfig_content),
         owner=params.logsearch_user,
         group=params.user_group)

    File(format("{logsearch_config_set_dir}/audit_logs/conf/solrconfig.xml"),
         content=InlineTemplate(
             params.logsearch_audit_logs_solrconfig_content),
         owner=params.logsearch_user,
         group=params.user_group)

    if params.security_enabled:
        File(format("{logsearch_jaas_file}"),
             content=Template("logsearch_jaas.conf.j2"),
             owner=params.logsearch_user)
    Execute(("chmod", "-R", "ugo+r",
             format("{logsearch_server_conf}/solr_configsets")),
            sudo=True)
    check_znode()

    if params.security_enabled and not params.logsearch_use_external_solr:
        solr_cloud_util.add_solr_roles(
            params.config,
            roles=[
                params.infra_solr_role_logsearch,
                params.infra_solr_role_ranger_admin, params.infra_solr_role_dev
            ],
            new_service_principals=[params.logsearch_kerberos_principal])
        solr_cloud_util.add_solr_roles(
            params.config,
            roles=[
                params.infra_solr_role_logfeeder, params.infra_solr_role_dev
            ],
            new_service_principals=[params.logfeeder_kerberos_principal])
コード例 #27
0
def kafka(upgrade_type=None):
    import params
    ensure_base_directories()

    kafka_server_config = mutable_config_dict(
        params.config['configurations']['kafka-broker'])
    # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
    # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.

    effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(
        params.version)
    Logger.info(format("Effective stack version: {effective_version}"))

    # listeners and advertised.listeners are only added in 2.3.0.0 onwards.
    if effective_version is not None and effective_version != "" and \
       check_stack_feature(StackFeature.KAFKA_LISTENERS, effective_version):

        listeners = kafka_server_config['listeners'].replace(
            "localhost", params.hostname)
        Logger.info(format("Kafka listeners: {listeners}"))
        kafka_server_config['listeners'] = listeners

        if params.kerberos_security_enabled and params.kafka_kerberos_enabled:
            Logger.info("Kafka kerberos security is enabled.")

            if "SASL" not in listeners:
                listeners = kafka_server_config['listeners'].replace(
                    "PLAINTEXT", "PLAINTEXTSASL")
                kafka_server_config['listeners'] = listeners

            kafka_server_config['advertised.listeners'] = listeners
            Logger.info(format("Kafka advertised listeners: {listeners}"))
        elif 'advertised.listeners' in kafka_server_config:
            advertised_listeners = kafka_server_config[
                'advertised.listeners'].replace("localhost", params.hostname)
            kafka_server_config['advertised.listeners'] = advertised_listeners
            Logger.info(
                format("Kafka advertised listeners: {advertised_listeners}"))
    else:
        kafka_server_config['host.name'] = params.hostname

    if params.has_metric_collector:
        kafka_server_config[
            'kafka.timeline.metrics.hosts'] = params.ams_collector_hosts
        kafka_server_config[
            'kafka.timeline.metrics.port'] = params.metric_collector_port
        kafka_server_config[
            'kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
        kafka_server_config[
            'kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
        kafka_server_config[
            'kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
        kafka_server_config[
            'kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password

    kafka_data_dir = kafka_server_config['log.dirs']
    kafka_data_dirs = filter(None, kafka_data_dir.split(","))

    rack = "/default-rack"
    i = 0
    if len(params.all_racks) > 0:
        for host in params.all_hosts:
            if host == params.hostname:
                rack = params.all_racks[i]
                break
            i = i + 1

    Directory(
        kafka_data_dirs,
        mode=0755,
        cd_access='a',
        owner=params.kafka_user,
        group=params.user_group,
        create_parents=True,
        recursive_ownership=True,
    )

    PropertiesFile(
        "server.properties",
        mode=0640,
        dir=params.conf_dir,
        properties=kafka_server_config,
        owner=params.kafka_user,
        group=params.user_group,
    )

    File(format("{conf_dir}/kafka-env.sh"),
         owner=params.kafka_user,
         content=InlineTemplate(params.kafka_env_sh_template))

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=InlineTemplate(params.log4j_props))

    if (params.kerberos_security_enabled and
            params.kafka_kerberos_enabled) or params.kafka_other_sasl_enabled:
        if params.kafka_jaas_conf_template:
            File(format("{conf_dir}/kafka_jaas.conf"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                           owner=params.kafka_user)

        if params.kafka_client_jaas_conf_template:
            File(format("{conf_dir}/kafka_client_jaas.conf"),
                 owner=params.kafka_user,
                 content=InlineTemplate(
                     params.kafka_client_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
                           owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("kafka.conf.j2"))

    File(os.path.join(params.conf_dir, 'tools-log4j.properties'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("tools-log4j.properties.j2"))

    generate_logfeeder_input_config(
        'kafka', Template("input.config-kafka.json.j2",
                          extra_imports=[default]))

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #28
0
def hbase(name=None):
    import params

    # ensure that matching LZO libraries are installed for HBase
    lzo_utils.install_lzo_if_needed()

    Directory(params.etc_prefix_dir, mode=0755)

    Directory(params.hbase_conf_dir,
              owner=params.hbase_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.java_io_tmpdir, create_parents=True, mode=0777)

    # If a file location is specified in ioengine parameter,
    # ensure that directory exists. Otherwise create the
    # directory with permissions assigned to hbase:hadoop.
    ioengine_input = params.ioengine_param
    if ioengine_input != None:
        if ioengine_input.startswith("file:/"):
            ioengine_fullpath = ioengine_input[5:]
            ioengine_dir = os.path.dirname(ioengine_fullpath)
            Directory(ioengine_dir,
                      owner=params.hbase_user,
                      group=params.user_group,
                      create_parents=True,
                      mode=0755)

    parent_dir = os.path.dirname(params.tmp_dir)
    # In case if we have several placeholders in path
    while ("${" in parent_dir):
        parent_dir = os.path.dirname(parent_dir)
    if parent_dir != os.path.abspath(os.sep):
        Directory(
            parent_dir,
            create_parents=True,
            cd_access="a",
        )
        Execute(("chmod", "1777", parent_dir), sudo=True)

    XmlConfig("hbase-site.xml",
              conf_dir=params.hbase_conf_dir,
              configurations=params.config['configurations']['hbase-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['hbase-site'],
              owner=params.hbase_user,
              group=params.user_group)

    if check_stack_feature(StackFeature.PHOENIX_CORE_HDFS_SITE_REQUIRED,
                           params.version_for_stack_feature_checks):
        XmlConfig(
            "core-site.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['core-site'],
            configuration_attributes=params.config['configurationAttributes']
            ['core-site'],
            owner=params.hbase_user,
            group=params.user_group,
            xml_include_file=params.mount_table_xml_inclusion_file_full_path)

        if params.mount_table_content:
            File(params.mount_table_xml_inclusion_file_full_path,
                 owner=params.hbase_user,
                 group=params.user_group,
                 content=params.mount_table_content,
                 mode=0644)

        if 'hdfs-site' in params.config['configurations']:
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.hbase_user,
                group=params.user_group)
    else:
        File(format("{params.hbase_conf_dir}/hdfs-site.xml"), action="delete")
        File(format("{params.hbase_conf_dir}/core-site.xml"), action="delete")

    if 'hbase-policy' in params.config['configurations']:
        XmlConfig(
            "hbase-policy.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hbase-policy'],
            configuration_attributes=params.config['configurationAttributes']
            ['hbase-policy'],
            owner=params.hbase_user,
            group=params.user_group)
    # Manually overriding ownership of file installed by hadoop package
    else:
        File(format("{params.hbase_conf_dir}/hbase-policy.xml"),
             owner=params.hbase_user,
             group=params.user_group)

    File(
        format("{hbase_conf_dir}/hbase-env.sh"),
        owner=params.hbase_user,
        content=InlineTemplate(params.hbase_env_sh_template),
        group=params.user_group,
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hbase.conf.j2"))

    hbase_TemplateConfig(
        params.metric_prop_file_name,
        tag='GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS')

    hbase_TemplateConfig('regionservers')

    if params.security_enabled:
        hbase_TemplateConfig(format("hbase_{name}_jaas.conf"))

    if name != "client":
        Directory(
            params.pid_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        Directory(
            params.log_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        generate_logfeeder_input_config(
            'hbase',
            Template("input.config-hbase.json.j2", extra_imports=[default]))

    if (params.log4j_props != None):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user,
             content=InlineTemplate(params.log4j_props))
    elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user)
    if name == "master":
        params.HdfsResource(params.hbase_hdfs_root_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hbase_user)
        params.HdfsResource(params.hbase_staging_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hbase_user,
                            mode=0711)
        if params.create_hbase_home_directory:
            params.HdfsResource(params.hbase_home_directory,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hbase_user,
                                mode=0755)
        params.HdfsResource(None, action="execute")

    if params.phoenix_enabled:
        Package(params.phoenix_package,
                retry_on_repo_unavailability=params.
                agent_stack_retry_on_unavailability,
                retry_count=params.agent_stack_retry_count)
コード例 #29
0
ファイル: setup_logsearch.py プロジェクト: jtstorck/ambari
def setup_logsearch():
  import params

  Directory([params.logsearch_log_dir, params.logsearch_pid_dir],
            mode=0755,
            cd_access='a',
            owner=params.logsearch_user,
            group=params.user_group,
            create_parents=True
            )

  Directory([params.logsearch_dir, params.logsearch_server_conf, params.logsearch_config_set_dir],
            mode=0755,
            cd_access='a',
            owner=params.logsearch_user,
            group=params.user_group,
            create_parents=True,
            recursive_ownership=True
            )

  Directory(params.logsearch_server_keys_folder,
            cd_access='a',
            mode=0755,
            owner=params.logsearch_user,
            group=params.user_group)

  File(format("{logsearch_server_keys_folder}/ks_pass.txt"),
       content=params.logsearch_keystore_password,
       mode=0600,
       owner=params.logsearch_user,
       group=params.user_group
       )

  File(format("{logsearch_server_keys_folder}/ts_pass.txt"),
       content=params.logsearch_truststore_password,
       mode=0600,
       owner=params.logsearch_user,
       group=params.user_group
       )

  File(params.logsearch_log,
       mode=0644,
       owner=params.logsearch_user,
       group=params.user_group,
       content=''
       )

  PropertiesFile(format("{logsearch_server_conf}/logsearch.properties"),
                 properties=params.logsearch_properties
                 )

  File(format("{logsearch_server_conf}/HadoopServiceConfig.json"),
       content=Template("HadoopServiceConfig.json.j2"),
       owner=params.logsearch_user,
       group=params.user_group
       )

  File(format("{logsearch_server_conf}/log4j.xml"),
       content=InlineTemplate(params.logsearch_app_log4j_content),
       owner=params.logsearch_user,
       group=params.user_group
       )

  File(format("{logsearch_server_conf}/logsearch-env.sh"),
       content=InlineTemplate(params.logsearch_env_content),
       mode=0755,
       owner=params.logsearch_user,
       group=params.user_group
       )

  File(format("{logsearch_server_conf}/logsearch-admin.json"),
       content=InlineTemplate(params.logsearch_admin_content),
       owner=params.logsearch_user,
       group=params.user_group
       )

  File(format("{logsearch_config_set_dir}/hadoop_logs/conf/solrconfig.xml"),
       content=InlineTemplate(params.logsearch_service_logs_solrconfig_content),
       owner=params.logsearch_user,
       group=params.user_group
       )

  File(format("{logsearch_config_set_dir}/audit_logs/conf/solrconfig.xml"),
       content=InlineTemplate(params.logsearch_audit_logs_solrconfig_content),
       owner=params.logsearch_user,
       group=params.user_group
       )

  if params.security_enabled:
    File(format("{logsearch_jaas_file}"),
         content=Template("logsearch_jaas.conf.j2"),
         owner=params.logsearch_user
         )

  Execute(("chmod", "-R", "ugo+r", format("{logsearch_server_conf}/solr_configsets")),
          sudo=True
          )
コード例 #30
0
def namenode(action=None,
             hdfs_binary=None,
             do_format=True,
             upgrade_type=None,
             env=None):
    if action is None:
        raise Fail('"action" parameter is required for function namenode().')

    if action in ["start", "stop"] and hdfs_binary is None:
        raise Fail(
            '"hdfs_binary" parameter is required for function namenode().')

    if action == "configure":
        import params
        #we need this directory to be present before any action(HA manual steps for
        #additional namenode)
        create_name_dirs(params.dfs_name_dir)
    elif action == "start":
        Logger.info("Called service {0} with upgrade_type: {1}".format(
            action, str(upgrade_type)))
        setup_ranger_hdfs(upgrade_type=upgrade_type)
        import params
        if do_format and not params.hdfs_namenode_format_disabled:
            format_namenode()
            pass

        File(params.exclude_file_path,
             content=Template("exclude_hosts_list.j2"),
             owner=params.hdfs_user,
             group=params.user_group)

        if params.dfs_ha_enabled and \
          params.dfs_ha_namenode_standby is not None and \
          params.hostname == params.dfs_ha_namenode_standby:
            # if the current host is the standby NameNode in an HA deployment
            # run the bootstrap command, to start the NameNode in standby mode
            # this requires that the active NameNode is already up and running,
            # so this execute should be re-tried upon failure, up to a timeout
            success = bootstrap_standby_namenode(params)
            if not success:
                raise Fail("Could not bootstrap standby namenode")

        if upgrade_type == "rolling" and params.dfs_ha_enabled:
            # Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
            # to kill ZKFC manually, so we need to start it if not already running.
            safe_zkfc_op(action, env)

        options = ""
        if upgrade_type == "rolling":
            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"

        elif upgrade_type == "nonrolling":
            is_previous_image_dir = is_previous_fs_image()
            Logger.info(
                format(
                    "Previous file system image dir present is {is_previous_image_dir}"
                ))

            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"

        Logger.info(format("Option for start command: {options}"))

        service(action="start",
                name="namenode",
                user=params.hdfs_user,
                options=options,
                create_pid_dir=True,
                create_log_dir=True)

        if params.security_enabled:
            Execute(format(
                "{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"
            ),
                    user=params.hdfs_user)

        if params.dfs_ha_enabled:
            is_active_namenode_cmd = as_user(format(
                "{hdfs_binary} --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"
            ),
                                             params.hdfs_user,
                                             env={
                                                 'PATH': params.hadoop_bin_dir
                                             })
        else:
            is_active_namenode_cmd = True

        # During NonRolling Upgrade, both NameNodes are initially down,
        # so no point in checking if this is the active or standby.
        if upgrade_type == "nonrolling":
            is_active_namenode_cmd = False

        # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
        # no-HA                 | ON -> OFF                | Yes                      |
        # HA and active         | ON -> OFF                | Yes                      |
        # HA and standby        | no change                | no check                 |
        # RU with HA on active  | ON -> OFF                | Yes                      |
        # RU with HA on standby | ON -> OFF                | Yes                      |
        # EU with HA on active  | no change                | no check                 |
        # EU with HA on standby | no change                | no check                 |
        # EU non-HA             | no change                | no check                 |

        check_for_safemode_off = False
        msg = ""
        if params.dfs_ha_enabled:
            if upgrade_type is not None:
                check_for_safemode_off = True
                msg = "Must wait to leave safemode since High Availability is enabled during a Stack Upgrade"
            else:
                Logger.info("Wait for NameNode to become active.")
                if is_active_namenode(hdfs_binary):  # active
                    check_for_safemode_off = True
                    msg = "Must wait to leave safemode since High Availability is enabled and this is the Active NameNode."
                else:
                    msg = "Will remain in the current safemode state."
        else:
            msg = "Must wait to leave safemode since High Availability is not enabled."
            check_for_safemode_off = True

        Logger.info(msg)

        # During a NonRolling (aka Express Upgrade), stay in safemode since the DataNodes are down.
        stay_in_safe_mode = False
        if upgrade_type == "nonrolling":
            stay_in_safe_mode = True

        if check_for_safemode_off:
            Logger.info("Stay in safe mode: {0}".format(stay_in_safe_mode))
            if not stay_in_safe_mode:
                wait_for_safemode_off(hdfs_binary)

        # Always run this on non-HA, or active NameNode during HA.
        create_hdfs_directories(is_active_namenode_cmd)
        create_ranger_audit_hdfs_directories(is_active_namenode_cmd)

    elif action == "stop":
        import params
        service(action="stop", name="namenode", user=params.hdfs_user)
    elif action == "status":
        import status_params
        check_process_status(status_params.namenode_pid_file)
    elif action == "decommission":
        decommission()
コード例 #31
0
def streamline(env, upgrade_type=None):
    import params
    ensure_base_directories()
    #Logger.info(format("Effective stack version: {effective_version}"))

    File(format("{conf_dir}/streamline-env.sh"),
         owner=params.streamline_user,
         content=InlineTemplate(params.streamline_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    Directory(
        [params.jar_storage],
        owner=params.streamline_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    # this is hard-coded as we are not accepting
    # the registry local-jars
    # should be removed from future releases
    Directory("/tmp/schema-registry/local-jars",
              owner=params.streamline_user,
              group=params.user_group,
              create_parents=True,
              cd_access="a",
              mode=0755)

    Directory(
        [params.topology_test_results],
        owner=params.streamline_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    File(os.path.join(params.limits_conf_dir, 'streamline.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("streamline.conf.j2"))

    File(format("{conf_dir}/streamline.yaml"),
         content=Template("streamline.yaml.j2"),
         owner=params.streamline_user,
         group=params.user_group,
         mode=0600)

    generate_logfeeder_input_config(
        'streamline',
        Template("input.config-streamline.json.j2", extra_imports=[default]))

    if params.security_enabled:
        if params.streamline_jaas_conf_template:
            File(format("{conf_dir}/streamline_jaas.conf"),
                 owner=params.streamline_user,
                 content=InlineTemplate(params.streamline_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/streamline_jaas.conf"),
                           owner=params.streamline_user)

    if not os.path.islink(params.streamline_managed_log_dir):
        Link(params.streamline_managed_log_dir, to=params.streamline_log_dir)

    download_database_connector_if_needed()
コード例 #32
0
def hdfs(name=None):
    import params

    if params.create_lib_snappy_symlinks:
        install_snappy()

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hdfs.conf.j2"))

    if params.security_enabled:
        File(os.path.join(params.hadoop_conf_dir, 'hdfs_dn_jaas.conf'),
             owner=params.hdfs_user,
             group=params.user_group,
             content=Template("hdfs_dn_jaas.conf.j2"))
        File(os.path.join(params.hadoop_conf_dir, 'hdfs_nn_jaas.conf'),
             owner=params.hdfs_user,
             group=params.user_group,
             content=Template("hdfs_nn_jaas.conf.j2"))
        if params.dfs_ha_enabled:
            File(os.path.join(params.hadoop_conf_dir, 'hdfs_jn_jaas.conf'),
                 owner=params.hdfs_user,
                 group=params.user_group,
                 content=Template("hdfs_jn_jaas.conf.j2"))

        tc_mode = 0644
        tc_owner = "root"
    else:
        tc_mode = None
        tc_owner = params.hdfs_user

    if "hadoop-policy" in params.config['configurations']:
        XmlConfig(
            "hadoop-policy.xml",
            conf_dir=params.hadoop_conf_dir,
            configurations=params.config['configurations']['hadoop-policy'],
            configuration_attributes=params.config['configuration_attributes']
            ['hadoop-policy'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-client" in params.config['configurations']:
        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

        Directory(
            params.hadoop_conf_secure_dir,
            create_parents=True,
            owner='root',
            group=params.user_group,
            cd_access='a',
        )

        XmlConfig(
            "ssl-client.xml",
            conf_dir=params.hadoop_conf_secure_dir,
            configurations=params.config['configurations']['ssl-client'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-client'],
            owner=params.hdfs_user,
            group=params.user_group)

    if "ssl-server" in params.config['configurations']:
        XmlConfig(
            "ssl-server.xml",
            conf_dir=params.hadoop_conf_dir,
            configurations=params.config['configurations']['ssl-server'],
            configuration_attributes=params.config['configuration_attributes']
            ['ssl-server'],
            owner=params.hdfs_user,
            group=params.user_group)

    XmlConfig(
        "hdfs-site.xml",
        conf_dir=params.hadoop_conf_dir,
        configurations=params.config['configurations']['hdfs-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['hdfs-site'],
        owner=params.hdfs_user,
        group=params.user_group)

    XmlConfig(
        "core-site.xml",
        conf_dir=params.hadoop_conf_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['core-site'],
        owner=params.hdfs_user,
        group=params.user_group,
        mode=0644)

    File(os.path.join(params.hadoop_conf_dir, 'slaves'),
         owner=tc_owner,
         content=Template("slaves.j2"))

    install_lzo_if_needed()