コード例 #1
0
def storm(name=None):
    import params
    import os

    Directory(
        params.log_dir,
        owner=params.storm_user,
        group=params.user_group,
        mode=0777,
        create_parents=True,
        cd_access="a",
    )

    Directory(
        [params.pid_dir, params.local_dir],
        owner=params.storm_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    Directory(
        params.conf_dir,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
    )

    File(
        format("{limits_conf_dir}/storm.conf"),
        owner='root',
        group='root',
        mode=0644,
        content=Template("storm.conf.j2"))

    File(
        format("{conf_dir}/config.yaml"),
        content=Template("config.yaml.j2"),
        owner=params.storm_user,
        group=params.user_group)

    configurations = params.config['configurations']['storm-site']

    File(
        format("{conf_dir}/storm.yaml"),
        content=yaml_config_template(configurations),
        owner=params.storm_user,
        group=params.user_group)

    File(
        format("{conf_dir}/storm-env.sh"),
        owner=params.storm_user,
        content=InlineTemplate(params.storm_env_sh_template))

    generate_logfeeder_input_config(
        'storm', Template(
            "input.config-storm.json.j2", extra_imports=[default]))

    # Generate atlas-application.properties.xml file and symlink the hook jars
    if params.enable_atlas_hook:
        atlas_hook_filepath = os.path.join(params.conf_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(
            SERVICE.STORM, params.storm_atlas_application_properties,
            atlas_hook_filepath, params.storm_user, params.user_group)

    if params.storm_logs_supported:
        Directory(
            params.log4j_dir,
            owner=params.storm_user,
            group=params.user_group,
            mode=0755,
            create_parents=True)

        File(
            format("{log4j_dir}/cluster.xml"),
            owner=params.storm_user,
            content=InlineTemplate(params.storm_cluster_log4j_content))
        File(
            format("{log4j_dir}/worker.xml"),
            owner=params.storm_user,
            content=InlineTemplate(params.storm_worker_log4j_content))

    if params.security_enabled:
        TemplateConfig(
            format("{conf_dir}/storm_jaas.conf"),
            owner=params.storm_user,
            mode=0644)
        TemplateConfig(
            format("{conf_dir}/client_jaas.conf"),
            owner=params.storm_user,
            mode=0644)
        minRuid = configurations['_storm.min.ruid'] if configurations.has_key(
            '_storm.min.ruid') else ''

        min_user_ruid = int(
            minRuid) if minRuid.isdigit() else _find_real_user_min_uid()

        File(
            format("{conf_dir}/worker-launcher.cfg"),
            content=Template(
                "worker-launcher.cfg.j2", min_user_ruid=min_user_ruid),
            owner='root',
            group=params.user_group)
    else:
        File(format("{conf_dir}/storm_jaas.conf"), action="delete")
        File(format("{conf_dir}/client_jaas.conf"), action="delete")
コード例 #2
0
def storm(name=None):
    import params
    import os

    Directory(
        params.log_dir,
        owner=params.storm_user,
        group=params.user_group,
        mode=0777,
        create_parents=True,
        cd_access="a",
    )

    Directory(
        [params.pid_dir, params.local_dir],
        owner=params.storm_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    Directory(
        params.conf_dir,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
    )

    File(format("{conf_dir}/config.yaml"),
         content=Template("config.yaml.j2"),
         owner=params.storm_user,
         group=params.user_group)

    File(params.conf_dir + "/jmxetric-conf.xml",
         content=StaticFile("jmxetric-conf.xml"),
         owner=params.storm_user)
    File(params.storm_lib_dir + "/gmetric4j-1.0.3.jar",
         content=StaticFile("gmetric4j-1.0.3.jar"),
         owner=params.storm_user)
    File(params.storm_lib_dir + "/jmxetric-1.0.4.jar",
         content=StaticFile("jmxetric-1.0.4.jar"),
         owner=params.storm_user)
    File(params.storm_lib_dir + "/oncrpc-1.0.7.jar",
         content=StaticFile("oncrpc-1.0.7.jar"),
         owner=params.storm_user)

    configurations = params.config['configurations']['storm-site']

    File(format("{conf_dir}/storm.yaml"),
         content=yaml_config_template(configurations),
         owner=params.storm_user,
         group=params.user_group)

    File(format("{conf_dir}/storm-env.sh"),
         owner=params.storm_user,
         content=InlineTemplate(params.storm_env_sh_template))

    # Generate atlas-application.properties.xml file and symlink the hook jars
    if params.enable_atlas_hook:
        script_path = os.path.realpath(__file__).split(
            '/services')[0] + '/hooks/before-INSTALL/scripts/atlas'
        sys.path.append(script_path)
        from setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook, setup_atlas_jar_symlinks
        atlas_hook_filepath = os.path.join(params.conf_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.STORM,
                         params.storm_atlas_application_properties,
                         atlas_hook_filepath, params.storm_user,
                         params.user_group)
        storm_extlib_dir = os.path.join(params.storm_component_home_dir,
                                        "extlib")
        setup_atlas_jar_symlinks("storm", storm_extlib_dir)

    if params.has_metric_collector:
        File(format("{conf_dir}/storm-metrics2.properties"),
             owner=params.storm_user,
             group=params.user_group,
             content=Template("storm-metrics2.properties.j2"))

        # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
        Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
             action="delete")
        # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
        Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar",
             action="delete")

        sink_jar = params.sink_jar

        Execute(format(
            "{sudo} ln -s {sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"
        ),
                not_if=format(
                    "ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
                only_if=format("ls {sink_jar}"))

    if params.storm_logs_supported:
        Directory(params.log4j_dir,
                  owner=params.storm_user,
                  group=params.user_group,
                  mode=0755,
                  create_parents=True)

        File(format("{log4j_dir}/cluster.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_cluster_log4j_content))
        File(format("{log4j_dir}/worker.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_worker_log4j_content))

    if params.security_enabled:
        TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                       owner=params.storm_user)

        TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                       owner=params.storm_user)
        minRuid = configurations['_storm.min.ruid'] if configurations.has_key(
            '_storm.min.ruid') else ''

        min_user_ruid = int(
            minRuid) if minRuid.isdigit() else _find_real_user_min_uid()

        File(format("{conf_dir}/worker-launcher.cfg"),
             content=Template("worker-launcher.cfg.j2",
                              min_user_ruid=min_user_ruid),
             owner='root',
             group=params.user_group)
コード例 #3
0
ファイル: kafka.py プロジェクト: shwhite/ambari
def kafka(upgrade_type=None):
    import params
    ensure_base_directories()

    kafka_server_config = mutable_config_dict(
        params.config['configurations']['kafka-broker'])
    # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
    # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.

    effective_version = params.hdp_stack_version if upgrade_type is None else format_hdp_stack_version(
        params.version)
    Logger.info(format("Effective stack version: {effective_version}"))

    if effective_version is not None and effective_version != "" and compare_versions(
            effective_version, '2.2.0.0') >= 0 and compare_versions(
                effective_version, '2.3.0.0') < 0:
        if len(params.kafka_hosts
               ) > 0 and params.hostname in params.kafka_hosts:
            brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
            kafka_server_config['broker.id'] = brokerid
            Logger.info(format("Calculating broker.id as {brokerid}"))

    # listeners and advertised.listeners are only added in 2.3.0.0 onwards.
    if effective_version is not None and effective_version != "" and compare_versions(
            effective_version, '2.3.0.0') >= 0:
        listeners = kafka_server_config['listeners'].replace(
            "localhost", params.hostname)
        Logger.info(format("Kafka listeners: {listeners}"))

        if params.security_enabled and params.kafka_kerberos_enabled:
            Logger.info("Kafka kerberos security is enabled.")
            if "SASL" not in listeners:
                listeners = listeners.replace("PLAINTEXT", "PLAINTEXTSASL")

            kafka_server_config['listeners'] = listeners
            kafka_server_config['advertised.listeners'] = listeners
            Logger.info(format("Kafka advertised listeners: {listeners}"))
        else:
            kafka_server_config['listeners'] = listeners

            if 'advertised.listeners' in kafka_server_config:
                advertised_listeners = kafka_server_config[
                    'advertised.listeners'].replace("localhost",
                                                    params.hostname)
                kafka_server_config[
                    'advertised.listeners'] = advertised_listeners
                Logger.info(
                    format(
                        "Kafka advertised listeners: {advertised_listeners}"))
    else:
        kafka_server_config['host.name'] = params.hostname

    if params.has_metric_collector:
        kafka_server_config[
            'kafka.timeline.metrics.host'] = params.metric_collector_host
        kafka_server_config[
            'kafka.timeline.metrics.port'] = params.metric_collector_port
        kafka_server_config[
            'kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
        kafka_server_config[
            'kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
        kafka_server_config[
            'kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
        kafka_server_config[
            'kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password

    kafka_data_dir = kafka_server_config['log.dirs']
    kafka_data_dirs = filter(None, kafka_data_dir.split(","))
    Directory(
        kafka_data_dirs,
        mode=0755,
        cd_access='a',
        owner=params.kafka_user,
        group=params.user_group,
        create_parents=True,
        recursive_ownership=True,
    )

    PropertiesFile(
        "server.properties",
        dir=params.conf_dir,
        properties=kafka_server_config,
        owner=params.kafka_user,
        group=params.user_group,
    )

    File(format("{conf_dir}/kafka-env.sh"),
         owner=params.kafka_user,
         content=InlineTemplate(params.kafka_env_sh_template))

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=params.log4j_props)

    if params.security_enabled and params.kafka_kerberos_enabled:
        TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                       owner=params.kafka_user)

        TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
                       owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("kafka.conf.j2"))

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #4
0
def storm(name=None):
    import params
    import os

    Directory(
        params.log_dir,
        owner=params.storm_user,
        group=params.user_group,
        mode=0777,
        create_parents=True,
        cd_access="a",
    )

    Directory(
        [params.pid_dir, params.local_dir],
        owner=params.storm_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    Directory(
        params.conf_dir,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
    )

    File(format("{conf_dir}/config.yaml"),
         content=Template("config.yaml.j2"),
         owner=params.storm_user,
         group=params.user_group)

    configurations = params.config['configurations']['storm-site']

    File(format("{conf_dir}/storm.yaml"),
         content=yaml_config_template(configurations),
         owner=params.storm_user,
         group=params.user_group)

    File(format("{conf_dir}/storm-env.sh"),
         owner=params.storm_user,
         content=InlineTemplate(params.storm_env_sh_template))

    # Generate atlas-application.properties.xml file and symlink the hook jars
    if params.enable_atlas_hook:
        atlas_hook_filepath = os.path.join(params.conf_dir,
                                           params.atlas_hook_filename)
        setup_atlas_hook(SERVICE.STORM,
                         params.storm_atlas_application_properties,
                         atlas_hook_filepath, params.storm_user,
                         params.user_group)
        storm_extlib_dir = os.path.join(params.storm_component_home_dir,
                                        "extlib")
        setup_atlas_jar_symlinks("storm", storm_extlib_dir)

    if params.has_metric_collector:
        File(format("{conf_dir}/storm-metrics2.properties"),
             owner=params.storm_user,
             group=params.user_group,
             content=Template("storm-metrics2.properties.j2"))

        # Remove symlinks. They can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
        Link(format("{storm_lib_dir}/ambari-metrics-storm-sink.jar"),
             action="delete")
        # On old HDP 2.1 versions, this symlink may also exist and break EU to newer versions
        Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar",
             action="delete")

        if check_stack_feature(StackFeature.STORM_METRICS_APACHE_CLASSES,
                               params.version_for_stack_feature_checks):
            sink_jar = params.metric_collector_sink_jar
        else:
            sink_jar = params.metric_collector_legacy_sink_jar

        Execute(format(
            "{sudo} ln -s {sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"
        ),
                not_if=format(
                    "ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
                only_if=format("ls {sink_jar}"))

    if params.storm_logs_supported:
        Directory(params.log4j_dir,
                  owner=params.storm_user,
                  group=params.user_group,
                  mode=0755,
                  create_parents=True)

        File(format("{log4j_dir}/cluster.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_cluster_log4j_content))
        File(format("{log4j_dir}/worker.xml"),
             owner=params.storm_user,
             content=InlineTemplate(params.storm_worker_log4j_content))

    if params.security_enabled:
        TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                       owner=params.storm_user)
        if params.stack_version_formatted and check_stack_feature(
                StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
            TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                           owner=params.storm_user)
            minRuid = configurations[
                '_storm.min.ruid'] if configurations.has_key(
                    '_storm.min.ruid') else ''

            min_user_ruid = int(
                minRuid) if minRuid.isdigit() else _find_real_user_min_uid()

            File(format("{conf_dir}/worker-launcher.cfg"),
                 content=Template("worker-launcher.cfg.j2",
                                  min_user_ruid=min_user_ruid),
                 owner='root',
                 group=params.user_group)
コード例 #5
0
def storm():
    import params

    Directory(params.log_dir,
              owner=params.storm_user,
              group=params.user_group,
              mode=0775,
              recursive=True)

    Directory(
        [params.pid_dir, params.local_dir, params.conf_dir],
        owner=params.storm_user,
        group=params.user_group,
        recursive=True,
        cd_access="a",
    )

    File(format("{conf_dir}/config.yaml"),
         content=Template("config.yaml.j2"),
         owner=params.storm_user,
         group=params.user_group)

    configurations = params.config['configurations']['storm-site']

    File(format("{conf_dir}/storm.yaml"),
         content=Template("storm.yaml.j2",
                          extra_imports=[escape_yaml_propetry],
                          configurations=configurations),
         owner=params.storm_user,
         group=params.user_group)

    if params.has_metric_collector:
        File(format("{conf_dir}/storm-metrics2.properties"),
             owner=params.storm_user,
             group=params.user_group,
             content=Template("storm-metrics2.properties.j2"))

        Execute(format(
            "sudo ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"
        ),
                not_if=format(
                    "ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
                only_if=format("ls {metric_collector_sink_jar}"))

    File(format("{conf_dir}/storm-env.sh"),
         owner=params.storm_user,
         content=InlineTemplate(params.storm_env_sh_template))

    if params.security_enabled:
        TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                       owner=params.storm_user)
        if params.hdp_stack_version != "" and compare_versions(
                params.hdp_stack_version, '2.2') >= 0:
            TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                           owner=params.storm_user)
            minRuid = configurations[
                '_storm.min.ruid'] if configurations.has_key(
                    '_storm.min.ruid') else ''

            min_user_ruid = int(
                minRuid) if minRuid.isdigit() else _find_real_user_min_uid()

            File(format("{conf_dir}/worker-launcher.cfg"),
                 content=Template("worker-launcher.cfg.j2",
                                  min_user_ruid=min_user_ruid),
                 owner='root',
                 group=params.user_group)
コード例 #6
0
def ams(name=None, action=None):
    import params

    if name == 'collector':
        Directory(
            params.ams_collector_conf_dir,
            owner=params.ams_user,
            group=params.user_group,
            create_parents=True,
            recursive_ownership=True,
        )

        Directory(params.ams_checkpoint_dir,
                  owner=params.ams_user,
                  group=params.user_group,
                  cd_access="a",
                  create_parents=True,
                  recursive_ownership=True)

        new_ams_site = {}
        new_ams_site.update(params.config['configurations']['ams-site'])
        if params.clusterHostInfoDict:
            master_components = []
            slave_components = []
            components = dict(params.clusterHostInfoDict).keys()
            known_slave_components = [
                "nodemanager", "metrics_monitor", "datanode",
                "hbase_regionserver"
            ]
            for component in components:
                if component and component.endswith("_hosts"):
                    component_name = component[:-6]
                elif component and component.endswith("_host"):
                    component_name = component[:-5]
                else:
                    continue
                if component_name in known_slave_components:
                    slave_components.append(component_name)
                else:
                    master_components.append(component_name)

            if slave_components:
                new_ams_site[
                    'timeline.metrics.initial.configured.slave.components'] = ",".join(
                        slave_components)
            if master_components:
                if 'ambari_server' not in master_components:
                    master_components.append('ambari_server')
                new_ams_site[
                    'timeline.metrics.initial.configured.master.components'] = ",".join(
                        master_components)

        hbase_total_heapsize_with_trailing_m = params.hbase_heapsize
        hbase_total_heapsize = int(
            hbase_total_heapsize_with_trailing_m[:-1]) * 1024 * 1024
        new_ams_site['hbase_total_heapsize'] = hbase_total_heapsize

        XmlConfig(
            "ams-site.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=new_ams_site,
            configuration_attributes=params.config['configurationAttributes']
            ['ams-site'],
            owner=params.ams_user,
            group=params.user_group)

        XmlConfig(
            "ssl-server.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=params.config['configurations']['ams-ssl-server'],
            configuration_attributes=params.config['configurationAttributes']
            ['ams-ssl-server'],
            owner=params.ams_user,
            group=params.user_group)

        merged_ams_hbase_site = {}
        merged_ams_hbase_site.update(
            params.config['configurations']['ams-hbase-site'])
        if params.security_enabled:
            merged_ams_hbase_site.update(
                params.config['configurations']['ams-hbase-security-site'])

        # Add phoenix client side overrides
        merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = str(
            params.phoenix_max_global_mem_percent)
        merged_ams_hbase_site[
            'phoenix.spool.directory'] = params.phoenix_client_spool_dir

        XmlConfig(
            "hbase-site.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=merged_ams_hbase_site,
            configuration_attributes=params.config['configurationAttributes']
            ['ams-hbase-site'],
            owner=params.ams_user,
            group=params.user_group)

        if params.security_enabled:
            TemplateConfig(os.path.join(params.hbase_conf_dir,
                                        "ams_collector_jaas.conf"),
                           owner=params.ams_user,
                           template_tag=None)

        if (params.log4j_props != None):
            File(format("{params.ams_collector_conf_dir}/log4j.properties"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.ams_user,
                 content=InlineTemplate(params.log4j_props))

        File(format("{ams_collector_conf_dir}/ams-env.sh"),
             owner=params.ams_user,
             content=InlineTemplate(params.ams_env_sh_template))

        Directory(
            params.ams_collector_log_dir,
            owner=params.ams_user,
            group=params.user_group,
            cd_access="a",
            create_parents=True,
            mode=0755,
        )

        Directory(
            params.ams_collector_pid_dir,
            owner=params.ams_user,
            group=params.user_group,
            cd_access="a",
            create_parents=True,
            mode=0755,
        )

        # Hack to allow native HBase libs to be included for embedded hbase
        File(os.path.join(params.ams_hbase_home_dir, "bin", "hadoop"),
             owner=params.ams_user,
             mode=0755)

        # On some OS this folder could be not exists, so we will create it before pushing there files
        Directory(params.limits_conf_dir,
                  create_parents=True,
                  owner='root',
                  group='root')

        # Setting up security limits
        File(os.path.join(params.limits_conf_dir, 'ams.conf'),
             owner='root',
             group='root',
             mode=0644,
             content=Template("ams.conf.j2"))

        # Phoenix spool file dir if not /tmp
        if not os.path.exists(params.phoenix_client_spool_dir):
            Directory(params.phoenix_client_spool_dir,
                      owner=params.ams_user,
                      mode=0755,
                      group=params.user_group,
                      cd_access="a",
                      create_parents=True)
        pass

        if not params.is_local_fs_rootdir and params.is_ams_distributed:
            # Configuration needed to support NN HA
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.ams_collector_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644)

            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644)

            # Remove spnego configs from core-site if platform does not have python-kerberos library
            truncated_core_site = {}
            truncated_core_site.update(
                params.config['configurations']['core-site'])
            if is_spnego_enabled(
                    params) and is_redhat_centos_6_plus() == False:
                truncated_core_site.pop('hadoop.http.authentication.type')
                truncated_core_site.pop('hadoop.http.filter.initializers')

            XmlConfig("core-site.xml",
                      conf_dir=params.ams_collector_conf_dir,
                      configurations=truncated_core_site,
                      configuration_attributes=params.
                      config['configurationAttributes']['core-site'],
                      owner=params.ams_user,
                      group=params.user_group,
                      mode=0644)

            XmlConfig("core-site.xml",
                      conf_dir=params.hbase_conf_dir,
                      configurations=truncated_core_site,
                      configuration_attributes=params.
                      config['configurationAttributes']['core-site'],
                      owner=params.ams_user,
                      group=params.user_group,
                      mode=0644)

        if params.metric_collector_https_enabled:
            export_ca_certs(params.ams_collector_conf_dir)

        pass

    elif name == 'monitor':

        if is_spnego_enabled(params) and is_redhat_centos_6_plus():
            try:
                import kerberos
            except ImportError:
                raise ImportError(
                    "python-kerberos package need to be installed to run AMS in SPNEGO mode"
                )

        Directory(params.ams_monitor_conf_dir,
                  owner=params.ams_user,
                  group=params.user_group,
                  create_parents=True)

        Directory(params.ams_monitor_log_dir,
                  owner=params.ams_user,
                  group=params.user_group,
                  mode=0755,
                  create_parents=True)

        if params.host_in_memory_aggregation and params.log4j_props is not None:
            File(format("{params.ams_monitor_conf_dir}/log4j.properties"),
                 mode=0644,
                 group=params.user_group,
                 owner=params.ams_user,
                 content=InlineTemplate(params.log4j_props))

            XmlConfig(
                "ams-site.xml",
                conf_dir=params.ams_monitor_conf_dir,
                configurations=params.config['configurations']['ams-site'],
                configuration_attributes=params.
                config['configurationAttributes']['ams-site'],
                owner=params.ams_user,
                group=params.user_group)
            XmlConfig("ssl-server.xml",
                      conf_dir=params.ams_monitor_conf_dir,
                      configurations=params.config['configurations']
                      ['ams-ssl-server'],
                      configuration_attributes=params.
                      config['configurationAttributes']['ams-ssl-server'],
                      owner=params.ams_user,
                      group=params.user_group)
            pass

        Execute(
            format(
                "{sudo} chown -R {ams_user}:{user_group} {ams_monitor_log_dir}"
            ))

        Directory(params.ams_monitor_pid_dir,
                  owner=params.ams_user,
                  group=params.user_group,
                  cd_access="a",
                  mode=0755,
                  create_parents=True)

        Directory(format("{ams_monitor_dir}/psutil/build"),
                  owner=params.ams_user,
                  group=params.user_group,
                  cd_access="a",
                  create_parents=True)

        Execute(
            format(
                "{sudo} chown -R {ams_user}:{user_group} {ams_monitor_dir}"))

        TemplateConfig(format("{ams_monitor_conf_dir}/metric_monitor.ini"),
                       owner=params.ams_user,
                       group=params.user_group,
                       template_tag=None)

        TemplateConfig(format("{ams_monitor_conf_dir}/metric_groups.conf"),
                       owner=params.ams_user,
                       group=params.user_group,
                       template_tag=None)

        File(format("{ams_monitor_conf_dir}/ams-env.sh"),
             owner=params.ams_user,
             content=InlineTemplate(params.ams_env_sh_template))

        if params.metric_collector_https_enabled or params.is_aggregation_https_enabled:
            export_ca_certs(params.ams_monitor_conf_dir)

        pass
    elif name == 'grafana':

        ams_grafana_directories = [
            params.ams_grafana_conf_dir, params.ams_grafana_log_dir,
            params.ams_grafana_data_dir, params.ams_grafana_pid_dir
        ]

        for ams_grafana_directory in ams_grafana_directories:
            Directory(ams_grafana_directory,
                      owner=params.ams_user,
                      group=params.user_group,
                      mode=0755,
                      create_parents=True,
                      recursive_ownership=True)

        File(format("{ams_grafana_conf_dir}/ams-grafana-env.sh"),
             owner=params.ams_user,
             group=params.user_group,
             content=InlineTemplate(params.ams_grafana_env_sh_template))

        File(format("{ams_grafana_conf_dir}/ams-grafana.ini"),
             owner=params.ams_user,
             group=params.user_group,
             content=InlineTemplate(params.ams_grafana_ini_template),
             mode=0600)

        if action != 'stop':
            for dir in ams_grafana_directories:
                Execute(('chown', '-R', params.ams_user, dir), sudo=True)

        if params.metric_collector_https_enabled:
            export_ca_certs(params.ams_grafana_conf_dir)

        pass
コード例 #7
0
ファイル: storm.py プロジェクト: qwurey/ambari
def storm(name=None):
  import params

  Directory(params.log_dir,
            owner=params.storm_user,
            group=params.user_group,
            mode=0777,
            recursive=True
  )

  Directory([params.pid_dir, params.local_dir],
            owner=params.storm_user,
            group=params.user_group,
            recursive=True,
            cd_access="a",
            mode=0755,
  )

  Directory(params.conf_dir,
            group=params.user_group,
            recursive=True,
            cd_access="a",
  )

  File(format("{conf_dir}/config.yaml"),
       content=Template("config.yaml.j2"),
       owner=params.storm_user,
       group=params.user_group
  )

  configurations = params.config['configurations']['storm-site']

  File(format("{conf_dir}/storm.yaml"),
       content=yaml_config_template(configurations),
       owner=params.storm_user,
       group=params.user_group
  )

  if params.has_metric_collector:
    File(format("{conf_dir}/storm-metrics2.properties"),
        owner=params.storm_user,
        group=params.user_group,
        content=Template("storm-metrics2.properties.j2")
    )

    # Remove symlink. It can be there, if you doing upgrade from HDP < 2.2 to HDP >= 2.2
    Link("/usr/lib/storm/lib/ambari-metrics-storm-sink.jar",
         action="delete")

    Execute(format("{sudo} ln -s {metric_collector_sink_jar} {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
            not_if=format("ls {storm_lib_dir}/ambari-metrics-storm-sink.jar"),
            only_if=format("ls {metric_collector_sink_jar}")
    )

  File(format("{conf_dir}/storm-env.sh"),
    owner=params.storm_user,
    content=InlineTemplate(params.storm_env_sh_template)
  )
  
  if params.storm_logs_supported:
    Directory(params.log4j_dir,
              owner=params.storm_user,
              group=params.user_group,
              mode=0755,
              recursive=True
    )
    
    File(format("{log4j_dir}/cluster.xml"),
      owner=params.storm_user,
      content=InlineTemplate(params.storm_cluster_log4j_content)
    )
    File(format("{log4j_dir}/worker.xml"),
      owner=params.storm_user,
      content=InlineTemplate(params.storm_worker_log4j_content)
    )
  
  if params.security_enabled:
    TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                   owner=params.storm_user
    )
    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
      TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                     owner=params.storm_user
      )
      minRuid = configurations['_storm.min.ruid'] if configurations.has_key('_storm.min.ruid') else ''
      
      min_user_ruid = int(minRuid) if minRuid.isdigit() else _find_real_user_min_uid()
      
      File(format("{conf_dir}/worker-launcher.cfg"),
           content=Template("worker-launcher.cfg.j2", min_user_ruid = min_user_ruid),
           owner='root',
           group=params.user_group
      )
コード例 #8
0
ファイル: streamline.py プロジェクト: tsingfu/bigdata
def streamline(env, upgrade_type=None):
    import params
    ensure_base_directories()
    # Logger.info(format("Effective stack version: {effective_version}"))

    File(format("{conf_dir}/streamline-env.sh"),
         owner=params.streamline_user,
         content=InlineTemplate(params.streamline_env_sh_template))

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    Directory(
        [params.jar_storage],
        owner=params.streamline_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    # this is hard-coded as we are not accepting
    # the registry local-jars
    # should be removed from future releases
    Directory("/tmp/schema-registry/local-jars",
              owner=params.streamline_user,
              group=params.user_group,
              create_parents=True,
              cd_access="a",
              mode=0755)

    Directory(
        [params.topology_test_results],
        owner=params.streamline_user,
        group=params.user_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
    )

    File(os.path.join(params.limits_conf_dir, 'streamline.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("streamline.conf.j2"))

    File(format("{conf_dir}/streamline.yaml"),
         content=Template("streamline.yaml.j2"),
         owner=params.streamline_user,
         group=params.user_group,
         mode=0644)

    if params.security_enabled:
        if params.streamline_jaas_conf_template:
            File(format("{conf_dir}/streamline_jaas.conf"),
                 owner=params.streamline_user,
                 content=InlineTemplate(params.streamline_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/streamline_jaas.conf"),
                           owner=params.streamline_user)

    if not os.path.islink(params.streamline_managed_log_dir):
        Link(params.streamline_managed_log_dir, to=params.streamline_log_dir)
コード例 #9
0
ファイル: kafka.py プロジェクト: tsingfu/bigdata
def kafka(upgrade_type=None):
    import params
    ensure_base_directories()

    kafka_server_config = mutable_config_dict(
        params.config['configurations']['kafka-broker'])
    # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
    # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.

    effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(
        params.version)
    Logger.info(format("Effective stack version: {effective_version}"))

    kafka_server_config['host.name'] = params.hostname

    if params.has_metric_collector:
        kafka_server_config[
            'kafka.timeline.metrics.hosts'] = params.ams_collector_hosts
        kafka_server_config[
            'kafka.timeline.metrics.port'] = params.metric_collector_port
        kafka_server_config[
            'kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
        kafka_server_config[
            'kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
        kafka_server_config[
            'kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
        kafka_server_config[
            'kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password

    kafka_data_dir = kafka_server_config['log.dirs']
    kafka_data_dirs = filter(None, kafka_data_dir.split(","))
    Directory(
        kafka_data_dirs,
        mode=0755,
        cd_access='a',
        owner=params.kafka_user,
        group=params.user_group,
        create_parents=True,
        recursive_ownership=True,
    )

    PropertiesFile(
        "server.properties",
        dir=params.conf_dir,
        properties=kafka_server_config,
        owner=params.kafka_user,
        group=params.user_group,
    )

    File(format("{conf_dir}/kafka-env.sh"),
         mode=0755,
         owner=params.kafka_user,
         content=InlineTemplate(params.kafka_env_sh_template))

    File(format("{kafka_bin_dir}/kafka-run-class.sh"),
         mode=0755,
         owner=params.kafka_user,
         content=InlineTemplate(params.kafka_run_class_content_template))

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=InlineTemplate(params.log4j_props))

    if params.security_enabled and params.kafka_kerberos_enabled:
        if params.kafka_jaas_conf_template:
            File(format("{conf_dir}/kafka_jaas.conf"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                           owner=params.kafka_user)

        if params.kafka_client_jaas_conf_template:
            File(format("{conf_dir}/kafka_client_jaas.conf"),
                 owner=params.kafka_user,
                 content=InlineTemplate(
                     params.kafka_client_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
                           owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("kafka.conf.j2"))

    File(os.path.join(params.conf_dir, 'tools-log4j.properties'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("tools-log4j.properties.j2"))

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #10
0
ファイル: metadata.py プロジェクト: zengzhaozheng/ambari
def metadata():
    import params

    Directory([params.pid_dir],
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.log_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.data_dir,
              mode=0644,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.expanded_war_dir,
              mode=0644,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    File(format("{expanded_war_dir}/atlas.war"),
         content=StaticFile(format('{metadata_home}/server/webapp/atlas.war')))

    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    File(format("{conf_dir}/atlas-env.sh"),
         owner=params.metadata_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.metadata_env_content))

    File(format("{conf_dir}/atlas-log4j.xml"),
         mode=0644,
         owner=params.metadata_user,
         group=params.user_group,
         content=StaticFile('atlas-log4j.xml'))

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)
コード例 #11
0
ファイル: hbase.py プロジェクト: xiaomatech/dataops
def hbase_TemplateConfig(name, tag=None):
    import params

    TemplateConfig(format("{hbase_conf_dir}/{name}"),
                   owner=params.hbase_user,
                   template_tag=tag)
コード例 #12
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(format('{conf_dir}/solr'),
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))
        File(format("{conf_dir}/solr/solrconfig.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    if type == 'server' and params.search_backend_solr and params.has_logsearch_solr:
        solr_cloud_util.setup_solr_client(params.config)

        random_num = random.random()

        upload_conf_set('basic_configs', random_num)

        create_collection('vertex_index', 'basic_configs')
        create_collection('edge_index', 'basic_configs')
        create_collection('fulltext_index', 'basic_configs')

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)
コード例 #13
0
ファイル: knox.py プロジェクト: xiaomatech/dataops
def knox():
    import params
    Directory(
        [
            params.knox_data_dir, params.knox_data_backup_dir,
            params.knox_logs_dir, params.knox_pid_dir, params.knox_conf_dir,
            os.path.join(params.knox_conf_dir, "topologies"),
            params.knox_descriptors_dir, params.knox_shared_providers_dir
        ],
        owner=params.knox_user,
        group=params.knox_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
        recursive_ownership=True,
    )

    XmlConfig(
        "gateway-site.xml",
        conf_dir=params.knox_conf_dir,
        configurations=params.config['configurations']['gateway-site'],
        configuration_attributes=params.config['configurationAttributes']
        ['gateway-site'],
        owner=params.knox_user,
        group=params.knox_group,
    )

    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
         mode=0644,
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.gateway_log4j))

    File(format("{params.knox_conf_dir}/topologies/default.xml"),
         mode=0600,
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.topology_template))

    if params.admin_topology_template:
        File(format("{params.knox_conf_dir}/topologies/admin.xml"),
             mode=0600,
             group=params.knox_group,
             owner=params.knox_user,
             content=InlineTemplate(params.admin_topology_template))

    knoxsso_topology_template_content = get_config("knoxsso-topology")
    if knoxsso_topology_template_content:
        File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
             mode=0600,
             group=params.knox_group,
             owner=params.knox_user,
             content=InlineTemplate(params.knoxsso_topology_template))

    if params.security_enabled:
        TemplateConfig(format("{knox_conf_dir}/krb5JAASLogin.conf"),
                       owner=params.knox_user,
                       template_tag=None)

    generate_logfeeder_input_config(
        'knox', Template("input.config-knox.json.j2", extra_imports=[default]))

    cmd = format(
        '{knox_client_bin} create-master --master {knox_master_secret!p}')
    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'),
                                  params.knox_user)

    Execute(
        cmd,
        user=params.knox_user,
        environment={'JAVA_HOME': params.java_home},
        not_if=master_secret_exist,
    )

    cmd = format(
        '{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'),
                               params.knox_user)

    Execute(
        cmd,
        user=params.knox_user,
        environment={'JAVA_HOME': params.java_home},
        not_if=cert_store_exist,
    )
コード例 #14
0
ファイル: metadata.py プロジェクト: csivaguru/ambari
def metadata(type='server'):
    import params

    Directory([params.pid_dir],
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents = True
    )

    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents = True
    )

    Directory(params.log_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents = True
    )

    Directory(params.data_dir,
              mode=0644,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents = True
    )

    Directory(params.expanded_war_dir,
              mode=0644,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents = True
    )

    File(format("{expanded_war_dir}/atlas.war"),
         content = StaticFile(format('{metadata_home}/server/webapp/atlas.war'))
    )

    PropertiesFile(format('{conf_dir}/{conf_file}'),
         properties = params.application_properties,
         mode=0644,
         owner=params.metadata_user,
         group=params.user_group
    )

    File(format("{conf_dir}/atlas-env.sh"),
         owner=params.metadata_user,
         group=params.user_group,
         mode=0755,
         content=InlineTemplate(params.metadata_env_content)
    )

    File(format("{conf_dir}/atlas-log4j.xml"),
         mode=0644,
         owner=params.metadata_user,
         group=params.user_group,
         content=InlineTemplate(params.metadata_log4j_content)
    )

    File(format("{conf_dir}/users-credentials.properties"),
         mode=0644,
         owner=params.metadata_user,
         group=params.user_group,
         content=StaticFile('users-credentials.properties')
    )

    File(format("{conf_dir}/policy-store.txt"),
         mode=0644,
         owner=params.metadata_user,
         group=params.user_group,
         content=StaticFile('policy-store.txt')
    )

    if type == 'server':
      random_num = random.random()

      upload_conf_set('basic_configs', random_num)

      create_collection('vertex_index', 'basic_configs')
      create_collection('edge_index', 'basic_configs')
      create_collection('fulltext_index', 'basic_configs')

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                         owner=params.metadata_user)
コード例 #15
0
ファイル: kafka.py プロジェクト: mbigelow/ambari
def kafka():
    import params

    Directory([params.kafka_log_dir, params.kafka_pid_dir, params.conf_dir],
              mode=0755,
              cd_access='a',
              owner=params.kafka_user,
              group=params.user_group,
              recursive=True
          )
    brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
    kafka_server_config = mutable_config_dict(params.config['configurations']['kafka-broker'])
    kafka_server_config['broker.id'] = brokerid

    #listeners and advertised.listeners are only added in 2.3.0.0 onwards.
    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.3.0.0') >= 0:
        if params.security_enabled and params.kafka_kerberos_enabled:
            listeners = kafka_server_config['listeners'].replace("localhost", params.hostname).replace("PLAINTEXT", "PLAINTEXTSASL")
            kafka_server_config['listeners'] = listeners
            kafka_server_config['advertised.listeners'] = listeners
        else:
            listeners = kafka_server_config['listeners'].replace("localhost", params.hostname)
            kafka_server_config['listeners'] = listeners
            if 'advertised.listeners' in kafka_server_config:
                advertised_listeners = kafka_server_config['advertised.listeners'].replace("localhost", params.hostname)
                kafka_server_config['advertised.listeners'] = advertised_listeners
    else:
        kafka_server_config['host.name'] = params.hostname


    kafka_server_config['kafka.metrics.reporters'] = params.kafka_metrics_reporters
    if(params.has_metric_collector):
            kafka_server_config['kafka.timeline.metrics.host'] = params.metric_collector_host
            kafka_server_config['kafka.timeline.metrics.port'] = params.metric_collector_port

    kafka_data_dir = kafka_server_config['log.dirs']
    Directory(filter(None,kafka_data_dir.split(",")),
              mode=0755,
              cd_access='a',
              owner=params.kafka_user,
              group=params.user_group,
              recursive=True)

    conf_dir = params.conf_dir
    properties_config("server.properties",
                      conf_dir=params.conf_dir,
                      configurations=kafka_server_config,
                      owner=params.kafka_user,
                      group=params.user_group,
                      brokerid=brokerid)

    File(format("{conf_dir}/kafka-env.sh"),
          owner=params.kafka_user,
          content=InlineTemplate(params.kafka_env_sh_template)
     )

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=params.log4j_props
         )

    if params.security_enabled and params.kafka_kerberos_enabled:
        TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                         owner=params.kafka_user)


    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #16
0
ファイル: ganglia.py プロジェクト: Liujinan001/ambari-2.7.5

def shell_file(name):
    import params

    File(params.ganglia_shell_cmds_dir + os.sep + name,
         content=StaticFile(name),
         mode=0755)


def ganglia_TemplateConfig(name, mode=0755, tag=None):
    import params

    TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
                   owner="root",
                   group="root",
                   template_tag=tag,
                   mode=mode)


def generate_daemon(ganglia_service,
                    name=None,
                    role=None,
                    owner=None,
                    group=None):
    import params

    cmd = ""
    if ganglia_service == "gmond":
        if role == "server":
            cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
コード例 #17
0
ファイル: metadata.py プロジェクト: lw-lin/ambari
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(format('{conf_dir}/solr'),
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))
        if params.metadata_solrconfig_content:
            File(format("{conf_dir}/solr/solrconfig.xml"),
                 mode=0644,
                 owner=params.metadata_user,
                 group=params.user_group,
                 content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
        solr_cloud_util.setup_solr_client(params.config)
        check_znode()
        jaasFile = params.atlas_jaas_file if params.security_enabled else None
        upload_conf_set('atlas_configs', jaasFile)

        create_collection('vertex_index', 'atlas_configs', jaasFile)
        create_collection('edge_index', 'atlas_configs', jaasFile)
        create_collection('fulltext_index', 'atlas_configs', jaasFile)

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2"))

    if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, get_stack_feature_version(params.config)) and\
      params.security_enabled and not params.host_with_kafka:

        File(params.atlas_kafka_setup,
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("atlas_kafka_acl.sh.j2"))

        File(format("{kafka_conf_dir}/kafka-env.sh"),
             owner=params.kafka_user,
             content=InlineTemplate(params.kafka_env_sh_template))

        File(format("{kafka_conf_dir}/kafka_jaas.conf"),
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("kafka_jaas.conf.j2"))
コード例 #18
0
ファイル: kafka.py プロジェクト: qwurey/ambari
def kafka():
    import params

    Directory([params.kafka_log_dir, params.kafka_pid_dir, params.conf_dir],
              mode=0755,
              cd_access='a',
              owner=params.kafka_user,
              group=params.user_group,
              recursive=True)

    kafka_server_config = mutable_config_dict(
        params.config['configurations']['kafka-broker'])
    # This still has an issue of out of alphabetical order of hostnames can get out of order broker.id assigned to them for HDP-2.2.
    # Since from HDP-2.3 kafka is handling the generation of broker.id ambari doesn't need to generate one.
    if params.hdp_stack_version != "" and compare_versions(
            params.hdp_stack_version, '2.2.0.0') >= 0 and compare_versions(
                params.hdp_stack_version, '2.3.0.0') < 0:
        brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
        kafka_server_config['broker.id'] = brokerid

    #listeners and advertised.listeners are only added in 2.3.0.0 onwards.
    if params.hdp_stack_version != "" and compare_versions(
            params.hdp_stack_version, '2.3.0.0') >= 0:
        if params.security_enabled and params.kafka_kerberos_enabled:
            listeners = kafka_server_config['listeners'].replace(
                "localhost", params.hostname)
            if "SASL" not in listeners:
                listeners = listeners.replace("PLAINTEXT", "PLAINTEXTSASL")
            kafka_server_config['listeners'] = listeners
            kafka_server_config['advertised.listeners'] = listeners
        else:
            listeners = kafka_server_config['listeners'].replace(
                "localhost", params.hostname)
            kafka_server_config['listeners'] = listeners
            if 'advertised.listeners' in kafka_server_config:
                advertised_listeners = kafka_server_config[
                    'advertised.listeners'].replace("localhost",
                                                    params.hostname)
                kafka_server_config[
                    'advertised.listeners'] = advertised_listeners
    else:
        kafka_server_config['host.name'] = params.hostname

    if (params.has_metric_collector):
        kafka_server_config[
            'kafka.timeline.metrics.host'] = params.metric_collector_host
        kafka_server_config[
            'kafka.timeline.metrics.port'] = params.metric_collector_port

    kafka_data_dir = kafka_server_config['log.dirs']
    Directory(filter(None, kafka_data_dir.split(",")),
              mode=0755,
              cd_access='a',
              owner=params.kafka_user,
              group=params.user_group,
              recursive=True)

    PropertiesFile(
        "server.properties",
        dir=params.conf_dir,
        properties=kafka_server_config,
        owner=params.kafka_user,
        group=params.user_group,
    )

    File(format("{conf_dir}/kafka-env.sh"),
         owner=params.kafka_user,
         content=InlineTemplate(params.kafka_env_sh_template))

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=params.log4j_props)

    if params.security_enabled and params.kafka_kerberos_enabled:
        TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                       owner=params.kafka_user)

        TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
                       owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              recursive=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("kafka.conf.j2"))

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #19
0
def ams(name=None):
    import params
    if name == 'collector':
        if not check_windows_service_exists(
                params.ams_collector_win_service_name):
            Execute(
                format(
                    "cmd /C cd {ams_collector_home_dir} & ambari-metrics-collector.cmd setup"
                ))

        Directory(params.ams_collector_conf_dir,
                  owner=params.ams_user,
                  create_parents=True)

        Directory(params.ams_checkpoint_dir,
                  owner=params.ams_user,
                  create_parents=True)

        XmlConfig(
            "ams-site.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=params.config['configurations']['ams-site'],
            configuration_attributes=params.config['configurationAttributes']
            ['ams-site'],
            owner=params.ams_user,
        )

        merged_ams_hbase_site = {}
        merged_ams_hbase_site.update(
            params.config['configurations']['ams-hbase-site'])
        if params.security_enabled:
            merged_ams_hbase_site.update(
                params.config['configurations']['ams-hbase-security-site'])

        XmlConfig(
            "hbase-site.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=merged_ams_hbase_site,
            configuration_attributes=params.config['configurationAttributes']
            ['ams-hbase-site'],
            owner=params.ams_user,
        )

        if (params.log4j_props != None):
            File(os.path.join(params.ams_collector_conf_dir,
                              "log4j.properties"),
                 owner=params.ams_user,
                 content=params.log4j_props)

        File(os.path.join(params.ams_collector_conf_dir, "ams-env.cmd"),
             owner=params.ams_user,
             content=InlineTemplate(params.ams_env_sh_template))

        ServiceConfig(params.ams_collector_win_service_name,
                      action="change_user",
                      username=params.ams_user,
                      password=Script.get_password(params.ams_user))

        if not params.is_local_fs_rootdir:
            # Configuration needed to support NN HA
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.ams_collector_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644)

            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644)

            XmlConfig(
                "core-site.xml",
                conf_dir=params.ams_collector_conf_dir,
                configurations=params.config['configurations']['core-site'],
                configuration_attributes=params.
                config['configurationAttributes']['core-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644)

            XmlConfig(
                "core-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=params.config['configurations']['core-site'],
                configuration_attributes=params.
                config['configurationAttributes']['core-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644)

        else:
            ServiceConfig(params.ams_embedded_hbase_win_service_name,
                          action="change_user",
                          username=params.ams_user,
                          password=Script.get_password(params.ams_user))
            # creating symbolic links on ams jars to make them available to services
            links_pairs = [
                ("%COLLECTOR_HOME%\\hbase\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
                 "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"
                 ),
            ]
            for link_pair in links_pairs:
                link, target = link_pair
                real_link = os.path.expandvars(link)
                target = compress_backslashes(
                    glob.glob(os.path.expandvars(target))[0])
                if not os.path.exists(real_link):
                    #TODO check the symlink destination too. Broken in Python 2.x on Windows.
                    Execute('cmd /c mklink "{0}" "{1}"'.format(
                        real_link, target))
        pass

    elif name == 'monitor':
        if not check_windows_service_exists(
                params.ams_monitor_win_service_name):
            Execute(
                format(
                    "cmd /C cd {ams_monitor_home_dir} & ambari-metrics-monitor.cmd setup"
                ))

        # creating symbolic links on ams jars to make them available to services
        links_pairs = [
            ("%HADOOP_HOME%\\share\\hadoop\\common\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
             "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"
             ),
            ("%HBASE_HOME%\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
             "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"
             ),
        ]
        for link_pair in links_pairs:
            link, target = link_pair
            real_link = os.path.expandvars(link)
            target = compress_backslashes(
                glob.glob(os.path.expandvars(target))[0])
            if not os.path.exists(real_link):
                #TODO check the symlink destination too. Broken in Python 2.x on Windows.
                Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))

        Directory(params.ams_monitor_conf_dir,
                  owner=params.ams_user,
                  create_parents=True)

        if params.host_in_memory_aggregation:
            if params.log4j_props is not None:
                File(os.path.join(params.ams_monitor_conf_dir,
                                  "log4j.properties"),
                     owner=params.ams_user,
                     content=params.log4j_props)
                pass

            XmlConfig(
                "ams-site.xml",
                conf_dir=params.ams_monitor_conf_dir,
                configurations=params.config['configurations']['ams-site'],
                configuration_attributes=params.
                config['configurationAttributes']['ams-site'],
                owner=params.ams_user,
                group=params.user_group)

            XmlConfig("ssl-server.xml",
                      conf_dir=params.ams_monitor_conf_dir,
                      configurations=params.config['configurations']
                      ['ams-ssl-server'],
                      configuration_attributes=params.
                      config['configurationAttributes']['ams-ssl-server'],
                      owner=params.ams_user,
                      group=params.user_group)
            pass

        TemplateConfig(os.path.join(params.ams_monitor_conf_dir,
                                    "metric_monitor.ini"),
                       owner=params.ams_user,
                       template_tag=None)

        TemplateConfig(os.path.join(params.ams_monitor_conf_dir,
                                    "metric_groups.conf"),
                       owner=params.ams_user,
                       template_tag=None)

        ServiceConfig(params.ams_monitor_win_service_name,
                      action="change_user",
                      username=params.ams_user,
                      password=Script.get_password(params.ams_user))
コード例 #20
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(format('{conf_dir}/solr'),
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))

        files_to_chown = [
            format("{conf_dir}/policy-store.txt"),
            format("{conf_dir}/users-credentials.properties")
        ]
        for file in files_to_chown:
            if os.path.exists(file):
                Execute(
                    ('chown', format('{metadata_user}:{user_group}'), file),
                    sudo=True)
                Execute(('chmod', '644', file), sudo=True)

        if params.metadata_solrconfig_content:
            File(format("{conf_dir}/solr/solrconfig.xml"),
                 mode=0644,
                 owner=params.metadata_user,
                 group=params.user_group,
                 content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
        solr_cloud_util.setup_solr_client(params.config)
        check_znode()
        jaasFile = params.atlas_jaas_file if params.security_enabled else None
        upload_conf_set('atlas_configs', jaasFile)

        create_collection('vertex_index', 'atlas_configs', jaasFile)
        create_collection('edge_index', 'atlas_configs', jaasFile)
        create_collection('fulltext_index', 'atlas_configs', jaasFile)

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2"))

    is_atlas_upgrade_support = check_stack_feature(
        StackFeature.ATLAS_UPGRADE_SUPPORT,
        get_stack_feature_version(params.config))

    if is_atlas_upgrade_support and params.security_enabled:

        File(params.atlas_kafka_setup,
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("atlas_kafka_acl.sh.j2"))

        #  files required only in case if kafka broker is not present on the host as configured component
        if not params.host_with_kafka:
            File(format("{kafka_conf_dir}/kafka-env.sh"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_env_sh_template))

            File(format("{kafka_conf_dir}/kafka_jaas.conf"),
                 group=params.user_group,
                 owner=params.kafka_user,
                 content=Template("kafka_jaas.conf.j2"))

    if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(
            params.namenode_host) > 1:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']
            ['hdfs-site'],
            owner=params.metadata_user,
            group=params.user_group,
            mode=0644)
    else:
        File(format('{conf_dir}/hdfs-site.xml'), action="delete")
コード例 #21
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(params.conf_dir,
              mode=0755,
              cd_access='a',
              owner=params.metadata_user,
              group=params.user_group,
              create_parents=True)

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(format('{conf_dir}/solr'),
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group,
                  create_parents=True)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))

        if not is_empty(params.atlas_admin_username) and not is_empty(
                params.atlas_admin_password):
            psswd_output = hashlib.sha256(
                params.atlas_admin_password).hexdigest()
            ModifyPropertiesFile(
                format("{conf_dir}/users-credentials.properties"),
                properties={
                    format('{atlas_admin_username}'):
                    format('ROLE_ADMIN::{psswd_output}')
                },
                owner=params.metadata_user)

        files_to_chown = [
            format("{conf_dir}/policy-store.txt"),
            format("{conf_dir}/users-credentials.properties")
        ]
        for file in files_to_chown:
            if os.path.exists(file):
                Execute(
                    ('chown', format('{metadata_user}:{user_group}'), file),
                    sudo=True)
                Execute(('chmod', '644', file), sudo=True)

        if params.metadata_solrconfig_content:
            File(format("{conf_dir}/solr/solrconfig.xml"),
                 mode=0644,
                 owner=params.metadata_user,
                 group=params.user_group,
                 content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0600,
                   owner=params.metadata_user,
                   group=params.user_group)

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr and params.has_infra_solr:
        solr_cloud_util.setup_solr_client(params.config)
        check_znode()
        jaasFile = params.atlas_jaas_file if params.security_enabled else None
        upload_conf_set('atlas_configs', jaasFile)

        if params.security_enabled:  # update permissions before creating the collections
            solr_cloud_util.add_solr_roles(
                params.config,
                roles=[
                    params.infra_solr_role_atlas,
                    params.infra_solr_role_ranger_audit,
                    params.infra_solr_role_dev
                ],
                new_service_principals=[params.atlas_jaas_principal])

        create_collection('vertex_index', 'atlas_configs', jaasFile)
        create_collection('edge_index', 'atlas_configs', jaasFile)
        create_collection('fulltext_index', 'atlas_configs', jaasFile)

        if params.security_enabled:
            secure_znode(format('{infra_solr_znode}/configs/atlas_configs'),
                         jaasFile)
            secure_znode(format('{infra_solr_znode}/collections/vertex_index'),
                         jaasFile)
            secure_znode(format('{infra_solr_znode}/collections/edge_index'),
                         jaasFile)
            secure_znode(
                format('{infra_solr_znode}/collections/fulltext_index'),
                jaasFile)

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2"))

    is_atlas_upgrade_support = check_stack_feature(
        StackFeature.ATLAS_UPGRADE_SUPPORT,
        get_stack_feature_version(params.config))

    if is_atlas_upgrade_support and params.security_enabled:

        File(params.atlas_kafka_setup,
             group=params.user_group,
             owner=params.kafka_user,
             content=Template("atlas_kafka_acl.sh.j2"))

        #  files required only in case if kafka broker is not present on the host as configured component
        if not params.host_with_kafka:
            File(format("{kafka_conf_dir}/kafka-env.sh"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_env_sh_template))

            File(format("{kafka_conf_dir}/kafka_jaas.conf"),
                 group=params.user_group,
                 owner=params.kafka_user,
                 content=Template("kafka_jaas.conf.j2"))

    if params.stack_supports_atlas_hdfs_site_on_namenode_ha and len(
            params.namenode_host) > 1:
        XmlConfig(
            "hdfs-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configurationAttributes']
            ['hdfs-site'],
            owner=params.metadata_user,
            group=params.user_group,
            mode=0644)
    else:
        File(format('{conf_dir}/hdfs-site.xml'), action="delete")
    '''
    Atlas requires hadoop core-site.xml to resolve users/groups synced in HadoopUGI for
    authentication and authorization process. Earlier the core-site.xml was available in
    Hbase conf directory which is a part of Atlas class-path, from stack 2.6 onwards,
    core-site.xml is no more available in Hbase conf directory. Hence need to create
    core-site.xml in Atlas conf directory.
    '''
    if params.stack_supports_atlas_core_site and params.has_namenode:
        XmlConfig(
            "core-site.xml",
            conf_dir=params.conf_dir,
            configurations=params.config['configurations']['core-site'],
            configuration_attributes=params.config['configurationAttributes']
            ['core-site'],
            owner=params.metadata_user,
            group=params.user_group,
            mode=0644)

    Directory(
        format('{metadata_home}/'),
        owner=params.metadata_user,
        group=params.user_group,
        recursive_ownership=True,
    )
コード例 #22
0
def hbase_TemplateConfig(name, tag=None, user=None):
    import params

    TemplateConfig(os.path.join(params.hbase_conf_dir, name),
                   owner=user,
                   template_tag=tag)
コード例 #23
0
ファイル: kafka.py プロジェクト: totongn/ambari
def kafka(upgrade_type=None):
    import params
    ensure_base_directories()

    kafka_server_config = mutable_config_dict(
        params.config['configurations']['kafka-broker'])
    # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
    # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.

    effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(
        params.version)
    Logger.info(format("Effective stack version: {effective_version}"))

    # In HDP-2.2 (Apache Kafka 0.8.1.1) we used to generate broker.ids based on hosts and add them to
    # kafka's server.properties. In future version brokers can generate their own ids based on zookeeper seq
    # We need to preserve the broker.id when user is upgrading from HDP-2.2 to any higher version.
    # Once its preserved it will be written to kafka.log.dirs/meta.properties and it will be used from there on
    # similarly we need preserve port as well during the upgrade

    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and \
      check_stack_feature(StackFeature.CREATE_KAFKA_BROKER_ID, params.current_version) and \
      check_stack_feature(StackFeature.KAFKA_LISTENERS, params.version):
        if len(params.kafka_hosts
               ) > 0 and params.hostname in params.kafka_hosts:
            brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
            kafka_server_config['broker.id'] = brokerid
            Logger.info(format("Calculating broker.id as {brokerid}"))
        if 'port' in kafka_server_config:
            port = kafka_server_config['port']
            Logger.info(format("Port config from previous verson: {port}"))
            listeners = kafka_server_config['listeners']
            kafka_server_config['listeners'] = listeners.replace("6667", port)
            Logger.info(
                format("Kafka listeners after the port update: {listeners}"))
            del kafka_server_config['port']


    if effective_version is not None and effective_version != "" and \
      check_stack_feature(StackFeature.CREATE_KAFKA_BROKER_ID, effective_version):
        if len(params.kafka_hosts
               ) > 0 and params.hostname in params.kafka_hosts:
            brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
            kafka_server_config['broker.id'] = brokerid
            Logger.info(format("Calculating broker.id as {brokerid}"))

    # listeners and advertised.listeners are only added in 2.3.0.0 onwards.
    if effective_version is not None and effective_version != "" and \
       check_stack_feature(StackFeature.KAFKA_LISTENERS, effective_version):

        listeners = kafka_server_config['listeners'].replace(
            "localhost", params.hostname)
        Logger.info(format("Kafka listeners: {listeners}"))
        kafka_server_config['listeners'] = listeners

        if params.security_enabled and params.kafka_kerberos_enabled:
            Logger.info("Kafka kerberos security is enabled.")
            kafka_server_config['advertised.listeners'] = listeners
            Logger.info(format("Kafka advertised listeners: {listeners}"))
        elif 'advertised.listeners' in kafka_server_config:
            advertised_listeners = kafka_server_config[
                'advertised.listeners'].replace("localhost", params.hostname)
            kafka_server_config['advertised.listeners'] = advertised_listeners
            Logger.info(
                format("Kafka advertised listeners: {advertised_listeners}"))
    else:
        kafka_server_config['host.name'] = params.hostname

    if params.has_metric_collector:
        kafka_server_config[
            'kafka.timeline.metrics.hosts'] = params.ams_collector_hosts
        kafka_server_config[
            'kafka.timeline.metrics.port'] = params.metric_collector_port
        kafka_server_config[
            'kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
        kafka_server_config[
            'kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
        kafka_server_config[
            'kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
        kafka_server_config[
            'kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password

    kafka_data_dir = kafka_server_config['log.dirs']
    kafka_data_dirs = filter(None, kafka_data_dir.split(","))
    Directory(
        kafka_data_dirs,
        mode=0755,
        cd_access='a',
        owner=params.kafka_user,
        group=params.user_group,
        create_parents=True,
        recursive_ownership=True,
    )

    PropertiesFile(
        "server.properties",
        dir=params.conf_dir,
        properties=kafka_server_config,
        owner=params.kafka_user,
        group=params.user_group,
    )

    File(format("{conf_dir}/kafka-env.sh"),
         owner=params.kafka_user,
         content=InlineTemplate(params.kafka_env_sh_template))

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=params.log4j_props)

    if params.security_enabled and params.kafka_kerberos_enabled:
        if params.kafka_jaas_conf_template:
            File(format("{conf_dir}/kafka_jaas.conf"),
                 owner=params.kafka_user,
                 content=InlineTemplate(params.kafka_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                           owner=params.kafka_user)

        if params.kafka_client_jaas_conf_template:
            File(format("{conf_dir}/kafka_client_jaas.conf"),
                 owner=params.kafka_user,
                 content=InlineTemplate(
                     params.kafka_client_jaas_conf_template))
        else:
            TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
                           owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("kafka.conf.j2"))

    File(os.path.join(params.conf_dir, 'tools-log4j.properties'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("tools-log4j.properties.j2"))

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #24
0
def kafka(upgrade_type=None):
    import params
    ensure_base_directories()

    kafka_server_config = mutable_config_dict(params.config['configurations']['kafka-broker'])
    # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
    # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.

    effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
    Logger.info(format("Effective stack version: {effective_version}"))

    # listeners and advertised.listeners are only added in 2.3.0.0 onwards.
    if effective_version is not None and effective_version != "" and \
       check_stack_feature(StackFeature.KAFKA_LISTENERS, effective_version):

       listeners = kafka_server_config['listeners'].replace("localhost", params.hostname)
       Logger.info(format("Kafka listeners: {listeners}"))
       kafka_server_config['listeners'] = listeners       

       if params.security_enabled and params.kafka_kerberos_enabled:
         Logger.info("Kafka kerberos security is enabled.")
         kafka_server_config['advertised.listeners'] = listeners
         Logger.info(format("Kafka advertised listeners: {listeners}"))
       elif 'advertised.listeners' in kafka_server_config:
         advertised_listeners = kafka_server_config['advertised.listeners'].replace("localhost", params.hostname)
         kafka_server_config['advertised.listeners'] = advertised_listeners
         Logger.info(format("Kafka advertised listeners: {advertised_listeners}"))
    else:
      kafka_server_config['host.name'] = params.hostname

    if params.has_metric_collector:
      kafka_server_config['kafka.timeline.metrics.hosts'] = params.ams_collector_hosts
      kafka_server_config['kafka.timeline.metrics.port'] = params.metric_collector_port
      kafka_server_config['kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
      kafka_server_config['kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
      kafka_server_config['kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
      kafka_server_config['kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password

    kafka_data_dir = kafka_server_config['log.dirs']
    kafka_data_dirs = filter(None, kafka_data_dir.split(","))

    rack="/default-rack"
    i=0
    if len(params.all_racks) > 0:
     for host in params.all_hosts:
      if host == params.hostname:
        rack=params.all_racks[i]
        break
      i=i+1

    kafka_server_config['broker.rack']=rack

    Directory(kafka_data_dirs,
              mode=0755,
              cd_access='a',
              owner=params.kafka_user,
              group=params.user_group,
              create_parents = True,
              recursive_ownership = True,
    )

    PropertiesFile("server.properties",
                      dir=params.conf_dir,
                      properties=kafka_server_config,
                      owner=params.kafka_user,
                      group=params.user_group,
    )

    File(format("{conf_dir}/kafka-env.sh"),
          owner=params.kafka_user,
          content=InlineTemplate(params.kafka_env_sh_template)
     )

    if (params.log4j_props != None):
        File(format("{conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.kafka_user,
             content=InlineTemplate(params.log4j_props)
         )

    if params.security_enabled and params.kafka_kerberos_enabled:
      if params.kafka_jaas_conf_template:
        File(format("{conf_dir}/kafka_jaas.conf"),
             owner=params.kafka_user,
             content=InlineTemplate(params.kafka_jaas_conf_template)
        )
      else:
        TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
                         owner=params.kafka_user)

      if params.kafka_client_jaas_conf_template:
        File(format("{conf_dir}/kafka_client_jaas.conf"),
             owner=params.kafka_user,
             content=InlineTemplate(params.kafka_client_jaas_conf_template)
        )
      else:
        TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
                       owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents = True,
              owner='root',
              group='root'
    )

    File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("kafka.conf.j2")
    )

    File(os.path.join(params.conf_dir, 'tools-log4j.properties'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("tools-log4j.properties.j2")
         )

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
コード例 #25
0
def knox():
    import params
    Directory(
        [
            params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir,
            params.knox_conf_dir,
            os.path.join(params.knox_conf_dir, "topologies")
        ],
        owner=params.knox_user,
        group=params.knox_group,
        create_parents=True,
        cd_access="a",
        mode=0755,
        recursive_ownership=True,
    )

    XmlConfig(
        "gateway-site.xml",
        conf_dir=params.knox_conf_dir,
        configurations=params.config['configurations']['gateway-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['gateway-site'],
        owner=params.knox_user,
        group=params.knox_group,
    )

    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
         mode=0644,
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.gateway_log4j))

    File(format("{params.knox_conf_dir}/topologies/default.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.topology_template))
    File(format("{params.knox_conf_dir}/topologies/admin.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.admin_topology_template))

    if params.version_formatted and check_stack_feature(
            StackFeature.KNOX_SSO_TOPOLOGY, params.version_formatted):
        File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
             group=params.knox_group,
             owner=params.knox_user,
             content=InlineTemplate(params.knoxsso_topology_template))

    if params.security_enabled:
        TemplateConfig(format("{knox_conf_dir}/krb5JAASLogin.conf"),
                       owner=params.knox_user,
                       template_tag=None)

    cmd = format(
        '{knox_client_bin} create-master --master {knox_master_secret!p}')
    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'),
                                  params.knox_user)

    Execute(
        cmd,
        user=params.knox_user,
        environment={'JAVA_HOME': params.java_home},
        not_if=master_secret_exist,
    )

    cmd = format(
        '{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'),
                               params.knox_user)

    Execute(
        cmd,
        user=params.knox_user,
        environment={'JAVA_HOME': params.java_home},
        not_if=cert_store_exist,
    )
コード例 #26
0
ファイル: druid.py プロジェクト: Liujinan001/ambari-2.7.5
def druid(upgrade_type=None, nodeType=None):
    import params
    ensure_base_directories()

    # Environment Variables
    File(format("{params.druid_conf_dir}/druid-env.sh"),
         owner=params.druid_user,
         content=InlineTemplate(params.druid_env_sh_template),
         mode=0700)

    # common config
    druid_common_config = mutable_config_dict(
        params.config['configurations']['druid-common'])
    # User cannot override below configs
    druid_common_config['druid.host'] = params.hostname
    druid_common_config[
        'druid.extensions.directory'] = params.druid_extensions_dir
    druid_common_config[
        'druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
    druid_common_config[
        'druid.selectors.indexing.serviceName'] = params.config[
            'configurations']['druid-overlord']['druid.service']
    druid_common_config['druid.selectors.coordinator.serviceName'] = \
      params.config['configurations']['druid-coordinator']['druid.service']
    druid_common_config['druid.extensions.loadList'] = json.dumps(
        eval(params.druid_extensions_load_list) +
        eval(params.druid_security_extensions_load_list))

    # delete the password and user if empty otherwiswe derby will fail.
    if 'derby' == druid_common_config['druid.metadata.storage.type']:
        del druid_common_config['druid.metadata.storage.connector.user']
        del druid_common_config['druid.metadata.storage.connector.password']

    druid_env_config = mutable_config_dict(
        params.config['configurations']['druid-env'])

    PropertiesFile("common.runtime.properties",
                   dir=params.druid_common_conf_dir,
                   properties=druid_common_config,
                   owner=params.druid_user,
                   group=params.user_group,
                   mode=0600)
    Logger.info("Created common.runtime.properties")

    File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
         mode=0644,
         owner=params.druid_user,
         group=params.user_group,
         content=InlineTemplate(params.log4j_props))
    Logger.info("Created log4j file")

    File("/etc/logrotate.d/druid",
         mode=0644,
         owner='root',
         group='root',
         content=InlineTemplate(params.logrotate_props))

    Logger.info("Created log rotate file")

    if params.security_enabled:
        TemplateConfig(params.druid_jaas_file,
                       owner=params.druid_user,
                       group=params.user_group,
                       mode=0644)

    # node specific configs
    for node_type in [
            'coordinator', 'overlord', 'historical', 'broker', 'middleManager',
            'router'
    ]:
        node_config_dir = format('{params.druid_conf_dir}/{node_type}')
        node_type_lowercase = node_type.lower()

        # Write runtime.properties file
        node_config = mutable_config_dict(params.config['configurations'][
            format('druid-{node_type_lowercase}')])
        if (node_type == 'middleManager'):
            # Replace correct values for stack_version and druid_jaas_file
            node_config['druid.indexer.runner.javaOpts'] = format(
                node_config['druid.indexer.runner.javaOpts'])
        PropertiesFile("runtime.properties",
                       dir=node_config_dir,
                       properties=node_config,
                       owner=params.druid_user,
                       group=params.user_group,
                       mode=0600)
        Logger.info(
            format("Created druid-{node_type_lowercase} runtime.properties"))

        # Write jvm configs
        File(
            format('{node_config_dir}/jvm.config'),
            owner=params.druid_user,
            group=params.user_group,
            content=InlineTemplate(
                "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
                node_heap_memory=druid_env_config[format(
                    'druid.{node_type_lowercase}.jvm.heap.memory')],
                log4j_config_file=format(
                    "{params.druid_common_conf_dir}/druid-log4j.xml"),
                node_direct_memory=druid_env_config[format(
                    'druid.{node_type_lowercase}.jvm.direct.memory')],
                node_jvm_opts=format(druid_env_config[format(
                    'druid.{node_type_lowercase}.jvm.opts')])))
        Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))
        # Handling hadoop Lzo jars if enable and node type is hadoop related eg Overlords and MMs
        if ['middleManager', 'overlord'
            ].__contains__(node_type_lowercase) and params.lzo_enabled:
            try:
                Logger.info(
                    format(
                        "Copying hadoop lzo jars from {hadoop_lib_home} to {druid_hadoop_dependencies_dir}/hadoop-client/*/"
                    ))
                Execute(
                    format(
                        '{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {druid_hadoop_dependencies_dir}/hadoop-client/*/'
                    ))
            except Fail as ex:
                Logger.info(
                    format(
                        "No Hadoop LZO found at {hadoop_lib_home}/hadoop-lzo*.jar"
                    ))

    generate_logfeeder_input_config(
        'druid', Template("input.config-druid.json.j2",
                          extra_imports=[default]))

    # All druid nodes have dependency on hdfs_client
    ensure_hadoop_directories()
    download_database_connector_if_needed()
    # Pull all required dependencies
    pulldeps()
コード例 #27
0
ファイル: knox.py プロジェクト: willwill1101/ambari
def knox():
    import params

    directories = [
        params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir,
        params.knox_conf_dir,
        os.path.join(params.knox_conf_dir, "topologies")
    ]
    for directory in directories:
        Directory(
            directory,
            owner=params.knox_user,
            group=params.knox_group,
            recursive=True,
            cd_access="a",
            mode=0755,
        )

    XmlConfig(
        "gateway-site.xml",
        conf_dir=params.knox_conf_dir,
        configurations=params.config['configurations']['gateway-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['gateway-site'],
        owner=params.knox_user,
        group=params.knox_group,
    )

    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
         mode=0644,
         group=params.knox_group,
         owner=params.knox_user,
         content=params.gateway_log4j)

    File(format("{params.knox_conf_dir}/topologies/default.xml"),
         group=params.knox_group,
         owner=params.knox_user,
         content=InlineTemplate(params.topology_template))
    if params.security_enabled:
        TemplateConfig(format("{knox_conf_dir}/krb5JAASLogin.conf"),
                       owner=params.knox_user,
                       template_tag=None)

    dirs_to_chown = tuple(directories)
    cmd = ('chown', '-R', format('{knox_user}:{knox_group}')) + dirs_to_chown
    Execute(
        cmd,
        sudo=True,
    )

    cmd = format(
        '{knox_client_bin} create-master --master {knox_master_secret!p}')
    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'),
                                  params.knox_user)

    Execute(
        cmd,
        user=params.knox_user,
        environment={'JAVA_HOME': params.java_home},
        not_if=master_secret_exist,
    )

    cmd = format(
        '{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'),
                               params.knox_user)

    Execute(
        cmd,
        user=params.knox_user,
        environment={'JAVA_HOME': params.java_home},
        not_if=cert_store_exist,
    )
コード例 #28
0
def metadata(type='server'):
    import params

    # Needed by both Server and Client
    Directory(
        params.conf_dir,
        mode=0755,
        cd_access='a',
        owner=params.metadata_user,
        group=params.user_group,
    )

    if type == "server":
        Directory([params.pid_dir],
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group)
        Directory(
            format('{conf_dir}/solr'),
            mode=0755,
            cd_access='a',
            owner=params.metadata_user,
            group=params.user_group,
        )
        Execute(("chown", "-R", params.metadata_user + ":" + params.user_group,
                 format('{conf_dir}/solr')),
                sudo=True)
        Directory(params.log_dir,
                  mode=0755,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group)
        Directory(params.data_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group)
        Directory(params.expanded_war_dir,
                  mode=0644,
                  cd_access='a',
                  owner=params.metadata_user,
                  group=params.user_group)
        File(format("{expanded_war_dir}/atlas.war"),
             content=StaticFile(
                 format('{metadata_home}/server/webapp/atlas.war')))
        File(format("{conf_dir}/atlas-log4j.xml"),
             mode=0644,
             owner=params.metadata_user,
             group=params.user_group,
             content=InlineTemplate(params.metadata_log4j_content))
        File(format("{conf_dir}/atlas-env.sh"),
             owner=params.metadata_user,
             group=params.user_group,
             mode=0755,
             content=InlineTemplate(params.metadata_env_content))

        files_to_chown = [
            format("{conf_dir}/policy-store.txt"),
            format("{conf_dir}/users-credentials.properties")
        ]
        for file in files_to_chown:
            if os.path.exists(file):
                Execute(
                    ('chown', format('{metadata_user}:{user_group}'), file),
                    sudo=True)
                Execute(('chmod', '644', file), sudo=True)

        if params.metadata_solrconfig_content:
            File(format("{conf_dir}/solr/solrconfig.xml"),
                 mode=0644,
                 owner=params.metadata_user,
                 group=params.user_group,
                 content=InlineTemplate(params.metadata_solrconfig_content))

    # Needed by both Server and Client
    PropertiesFile(format('{conf_dir}/{conf_file}'),
                   properties=params.application_properties,
                   mode=0644,
                   owner=params.metadata_user,
                   group=params.user_group)

    if params.security_enabled:
        TemplateConfig(format(params.atlas_jaas_file),
                       owner=params.metadata_user)

    if type == 'server' and params.search_backend_solr:
        create_collection('vertex_index')
        create_collection('edge_index')
        create_collection('fulltext_index')

    File(params.atlas_hbase_setup,
         group=params.user_group,
         owner=params.hbase_user,
         content=Template("atlas_hbase_setup.rb.j2"))
コード例 #29
0
def ams(name=None, action=None):
  import params

  if name == 'collector':
    Directory(params.ams_collector_conf_dir,
              owner=params.ams_user,
              group=params.user_group,
              create_parents = True,
              recursive_ownership = True,
    )
    
    Directory(params.ams_checkpoint_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True,
              recursive_ownership = True
    )

    XmlConfig("ams-site.xml",
              conf_dir=params.ams_collector_conf_dir,
              configurations=params.config['configurations']['ams-site'],
              configuration_attributes=params.config['configuration_attributes']['ams-site'],
              owner=params.ams_user,
              group=params.user_group
    )

    XmlConfig("ssl-server.xml",
              conf_dir=params.ams_collector_conf_dir,
              configurations=params.config['configurations']['ams-ssl-server'],
              configuration_attributes=params.config['configuration_attributes']['ams-ssl-server'],
              owner=params.ams_user,
              group=params.user_group
    )

    merged_ams_hbase_site = {}
    merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
    if params.security_enabled:
      merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])

    # Add phoenix client side overrides
    merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = str(params.phoenix_max_global_mem_percent)
    merged_ams_hbase_site['phoenix.spool.directory'] = params.phoenix_client_spool_dir

    XmlConfig( "hbase-site.xml",
               conf_dir = params.ams_collector_conf_dir,
               configurations = merged_ams_hbase_site,
               configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
               owner = params.ams_user,
               group = params.user_group
    )

    if params.security_enabled:
      TemplateConfig(os.path.join(params.hbase_conf_dir, "ams_collector_jaas.conf"),
                     owner = params.ams_user,
                     template_tag = None)

    if (params.log4j_props != None):
      File(format("{params.ams_collector_conf_dir}/log4j.properties"),
           mode=0644,
           group=params.user_group,
           owner=params.ams_user,
           content=InlineTemplate(params.log4j_props)
      )

    File(format("{ams_collector_conf_dir}/ams-env.sh"),
         owner=params.ams_user,
         content=InlineTemplate(params.ams_env_sh_template)
    )

    Directory(params.ams_collector_log_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True,
              mode=0755,
    )

    Directory(params.ams_collector_pid_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True,
              mode=0755,
    )

    # Hack to allow native HBase libs to be included for embedded hbase
    File(os.path.join(params.ams_hbase_home_dir, "bin", "hadoop"),
         owner=params.ams_user,
         mode=0755
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents = True,
              owner='root',
              group='root'
    )

    # Setting up security limits
    File(os.path.join(params.limits_conf_dir, 'ams.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("ams.conf.j2")
    )

    # Phoenix spool file dir if not /tmp
    if not os.path.exists(params.phoenix_client_spool_dir):
      Directory(params.phoenix_client_spool_dir,
                owner=params.ams_user,
                mode = 0755,
                group=params.user_group,
                cd_access="a",
                create_parents = True
      )
    pass

    if not params.is_local_fs_rootdir and params.is_ams_distributed:
      # Configuration needed to support NN HA
      XmlConfig("hdfs-site.xml",
            conf_dir=params.ams_collector_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
            owner=params.ams_user,
            group=params.user_group,
            mode=0644
      )

      XmlConfig("hdfs-site.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
            owner=params.ams_user,
            group=params.user_group,
            mode=0644
      )

      # Remove spnego configs from core-site, since AMS does not support spnego (AMBARI-14384)
      truncated_core_site = {}
      truncated_core_site.update(params.config['configurations']['core-site'])
      if 'core-site' in params.config['configurations']:
        if 'hadoop.http.authentication.type' in params.config['configurations']['core-site']:
          truncated_core_site.pop('hadoop.http.authentication.type')
        if 'hadoop.http.filter.initializers' in params.config['configurations']['core-site']:
          truncated_core_site.pop('hadoop.http.filter.initializers')

      XmlConfig("core-site.xml",
                conf_dir=params.ams_collector_conf_dir,
                configurations=truncated_core_site,
                configuration_attributes=params.config['configuration_attributes']['core-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644
      )

      XmlConfig("core-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=truncated_core_site,
                configuration_attributes=params.config['configuration_attributes']['core-site'],
                owner=params.ams_user,
                group=params.user_group,
                mode=0644
      )

    if params.metric_collector_https_enabled:
      export_ca_certs(params.ams_collector_conf_dir)

    pass

  elif name == 'monitor':
    Directory(params.ams_monitor_conf_dir,
              owner=params.ams_user,
              group=params.user_group,
              create_parents = True
    )

    Directory(params.ams_monitor_log_dir,
              owner=params.ams_user,
              group=params.user_group,
              mode=0755,
              create_parents = True
    )

    Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_log_dir}")
            )

    Directory(params.ams_monitor_pid_dir,
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              mode=0755,
              create_parents = True
    )

    Directory(format("{ams_monitor_dir}/psutil/build"),
              owner=params.ams_user,
              group=params.user_group,
              cd_access="a",
              create_parents = True)

    Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_dir}")
    )

    TemplateConfig(
      format("{ams_monitor_conf_dir}/metric_monitor.ini"),
      owner=params.ams_user,
      group=params.user_group,
      template_tag=None
    )

    TemplateConfig(
      format("{ams_monitor_conf_dir}/metric_groups.conf"),
      owner=params.ams_user,
      group=params.user_group,
      template_tag=None
    )

    File(format("{ams_monitor_conf_dir}/ams-env.sh"),
         owner=params.ams_user,
         content=InlineTemplate(params.ams_env_sh_template)
    )

    if params.metric_collector_https_enabled:
      export_ca_certs(params.ams_monitor_conf_dir)

    pass
  elif name == 'grafana':

    ams_grafana_directories = [
                              params.ams_grafana_conf_dir,
                              params.ams_grafana_log_dir,
                              params.ams_grafana_data_dir,
                              params.ams_grafana_pid_dir
                              ]

    for ams_grafana_directory in ams_grafana_directories:
      Directory(ams_grafana_directory,
                owner=params.ams_user,
                group=params.user_group,
                mode=0755,
                create_parents = True,
                recursive_ownership = True
                )

    File(format("{ams_grafana_conf_dir}/ams-grafana-env.sh"),
         owner=params.ams_user,
         group=params.user_group,
         content=InlineTemplate(params.ams_grafana_env_sh_template)
         )

    File(format("{ams_grafana_conf_dir}/ams-grafana.ini"),
         owner=params.ams_user,
         group=params.user_group,
         content=InlineTemplate(params.ams_grafana_ini_template),
         mode=0600
         )

    if action != 'stop':
      for dir in ams_grafana_directories:
        Execute(('chown', '-R', params.ams_user, dir),
                sudo=True
                )

    if params.metric_collector_https_enabled:
      export_ca_certs(params.ams_grafana_conf_dir)

    pass
コード例 #30
0
def kafka(upgrade_type=None):
    import params
    ensure_base_directories()

    File(
        format("{kafka_bin}"),
        owner=params.kafka_user,
        mode=0755,
        content=InlineTemplate(params.kafka_init_content))

    kafka_server_config = mutable_config_dict(
        params.config['configurations']['kafka-broker'])

    # listeners and advertised.
    listeners = kafka_server_config['listeners'].replace(
        "localhost", params.hostname)
    Logger.info(format("Kafka listeners: {listeners}"))
    kafka_server_config['listeners'] = listeners

    if params.kerberos_security_enabled and params.kafka_kerberos_enabled:
        Logger.info("Kafka kerberos security is enabled.")

        if "SASL" not in listeners:
            listeners = kafka_server_config['listeners']
            listeners = re.sub(r"(^|\b)PLAINTEXT://", "SASL_PLAINTEXT://",
                               listeners)
            listeners = re.sub(r"(^|\b)PLAINTEXTSASL://", "SASL_PLAINTEXT://",
                               listeners)
            listeners = re.sub(r"(^|\b)SSL://", "SASL_SSL://", listeners)
            kafka_server_config['listeners'] = listeners

        kafka_server_config['advertised.listeners'] = listeners
        Logger.info(format("Kafka advertised listeners: {listeners}"))
    elif 'advertised.listeners' in kafka_server_config:
        advertised_listeners = kafka_server_config[
            'advertised.listeners'].replace("localhost", params.hostname)
        kafka_server_config['advertised.listeners'] = advertised_listeners
        Logger.info(
            format("Kafka advertised listeners: {advertised_listeners}"))

    #kafka_server_config['host.name'] = params.hostname

    if params.has_metric_collector:
        kafka_server_config[
            'kafka.timeline.metrics.hosts'] = params.ams_collector_hosts
        kafka_server_config[
            'kafka.timeline.metrics.port'] = params.metric_collector_port
        kafka_server_config[
            'kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
        kafka_server_config[
            'kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
        kafka_server_config[
            'kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
        kafka_server_config[
            'kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password

    rack = "/default-rack"
    i = 0
    if len(params.all_racks) > 0:
        for host in params.all_hosts:
            if host == params.hostname:
                rack = params.all_racks[i]
                break
            i = i + 1

    Directory(
        params.kafka_data_dirs,
        mode=0755,
        cd_access='a',
        owner=params.kafka_user,
        group=params.user_group,
        create_parents=True,
        recursive_ownership=True,
    )

    PropertiesFile(
        "server.properties",
        mode=0640,
        dir=params.conf_dir,
        properties=kafka_server_config,
        owner=params.kafka_user,
        group=params.user_group,
    )

    File(
        format("{conf_dir}/kafka-env.sh"),
        owner=params.kafka_user,
        content=InlineTemplate(params.kafka_env_sh_template))

    if (params.log4j_props != None):
        File(
            format("{conf_dir}/log4j.properties"),
            mode=0644,
            group=params.user_group,
            owner=params.kafka_user,
            content=InlineTemplate(params.log4j_props))

    if (params.kerberos_security_enabled and
            params.kafka_kerberos_enabled) or params.kafka_other_sasl_enabled:
        if params.kafka_jaas_conf_template:
            File(
                format("{conf_dir}/kafka_jaas.conf"),
                owner=params.kafka_user,
                content=InlineTemplate(params.kafka_jaas_conf_template))
        else:
            TemplateConfig(
                format("{conf_dir}/kafka_jaas.conf"), owner=params.kafka_user)

        if params.kafka_client_jaas_conf_template:
            File(
                format("{conf_dir}/kafka_client_jaas.conf"),
                owner=params.kafka_user,
                content=InlineTemplate(params.kafka_client_jaas_conf_template))
        else:
            TemplateConfig(
                format("{conf_dir}/kafka_client_jaas.conf"),
                owner=params.kafka_user)

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(
        params.limits_conf_dir,
        create_parents=True,
        owner='root',
        group='root')

    File(
        os.path.join(params.limits_conf_dir, 'kafka.conf'),
        owner='root',
        group='root',
        mode=0644,
        content=Template("kafka.conf.j2"))

    File(
        os.path.join(params.conf_dir, 'tools-log4j.properties'),
        owner='root',
        group='root',
        mode=0644,
        content=Template("tools-log4j.properties.j2"))

    generate_logfeeder_input_config(
        'kafka', Template(
            "input.config-kafka.json.j2", extra_imports=[default]))

    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)