示例#1
0
    def install_ranger(self):
        import params
        hbase_user = params.config['configurations']['hbase-env']['hbase_user']
        user_group = params.config['configurations']['cluster-env'][
            "user_group"]
        splicemachine_conf_dir = '/etc/splicemachine/conf'
        hdfs_audit_spool = params.config['configurations'][
            'ranger-splicemachine-audit'][
                'xasecure.audit.destination.hdfs.batch.filespool.dir']
        solr_audit_spool = params.config['configurations'][
            'ranger-splicemachine-audit'][
                'xasecure.audit.destination.solr.batch.filespool.dir']
        policy_cache_dir = params.config['configurations'][
            'ranger-splicemachine-security'][
                'ranger.plugin.splicemachine.policy.cache.dir']
        hdfs_audit_dir = params.config['configurations'][
            'ranger-splicemachine-audit'][
                'xasecure.audit.destination.hdfs.dir']

        Directory(splicemachine_conf_dir,
                  owner=hbase_user,
                  group=user_group,
                  create_parents=True)

        Directory(hdfs_audit_spool,
                  owner=hbase_user,
                  group=user_group,
                  create_parents=True)

        Directory(solr_audit_spool,
                  owner=hbase_user,
                  group=user_group,
                  create_parents=True)

        Directory(policy_cache_dir,
                  owner=hbase_user,
                  group=user_group,
                  create_parents=True)

        XmlConfig(
            "ranger-splicemachine-security.xml",
            conf_dir=splicemachine_conf_dir,
            configurations=params.config['configurations']
            ['ranger-splicemachine-security'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-splicemachine-security'],
            owner=hbase_user,
            group=user_group,
        )
        XmlConfig(
            "ranger-splicemachine-audit.xml",
            conf_dir=splicemachine_conf_dir,
            configurations=params.config['configurations']
            ['ranger-splicemachine-audit'],
            configuration_attributes=params.config['configuration_attributes']
            ['ranger-splicemachine-audit'],
            owner=hbase_user,
            group=user_group,
        )
示例#2
0
def mahout():
    import params

    # ensure that matching LZO libraries are installed for Mahout
    lzo_utils.install_lzo_if_needed()

    Directory(params.mahout_conf_dir,
              create_parents=True,
              owner=params.mahout_user,
              group=params.user_group)

    XmlConfig(
        "yarn-site.xml",
        conf_dir=params.hadoop_conf_dir,
        configurations=params.config['configurations']['yarn-site'],
        configuration_attributes=params.config['configuration_attributes']
        ['yarn-site'],
        owner=params.yarn_user,
        group=params.user_group,
        mode=0644)

    if not is_empty(params.log4j_props):
        File(format("{params.mahout_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.mahout_user,
             content=params.log4j_props)
    elif (os.path.exists(format("{params.mahout_conf_dir}/log4j.properties"))):
        File(format("{params.mahout_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.mahout_user)
示例#3
0
    def generate_configs(self, env):
        """
    Generates config files and stores them as an archive in tmp_dir
    based on xml_configs_list and env_configs_list from commandParams
    """
        import params
        env.set_params(params)
        xml_configs_list = params.config['commandParams']['xml_configs_list']
        env_configs_list = params.config['commandParams']['env_configs_list']
        conf_tmp_dir = tempfile.mkdtemp()
        output_filename = os.path.join(
            self.get_tmp_dir(), params.config['commandParams']['output_file'])

        Directory(self.get_tmp_dir(), recursive=True)
        for file_dict in xml_configs_list:
            for filename, dict in file_dict.iteritems():
                XmlConfig(
                    filename,
                    conf_dir=conf_tmp_dir,
                    configurations=params.config['configurations'][dict],
                    configuration_attributes=params.
                    config['configuration_attributes'][dict],
                )
        for file_dict in env_configs_list:
            for filename, dict in file_dict.iteritems():
                File(os.path.join(conf_tmp_dir, filename),
                     content=InlineTemplate(
                         params.config['configurations'][dict]['content']))
        with closing(tarfile.open(output_filename, "w:gz")) as tar:
            tar.add(conf_tmp_dir, arcname=os.path.basename("."))
            tar.close()
        Directory(conf_tmp_dir, action="delete")
示例#4
0
    def generate_configs(self, env):
        """
    Generates config files and stores them as an archive in tmp_dir
    based on xml_configs_list and env_configs_list from commandParams
    """
        import params
        env.set_params(params)

        config = self.get_config()

        xml_configs_list = config['commandParams']['xml_configs_list']
        env_configs_list = config['commandParams']['env_configs_list']
        properties_configs_list = config['commandParams'][
            'properties_configs_list']

        Directory(self.get_tmp_dir(), create_parents=True)

        conf_tmp_dir = tempfile.mkdtemp(dir=self.get_tmp_dir())
        os.chmod(conf_tmp_dir, 0700)
        output_filename = os.path.join(self.get_tmp_dir(),
                                       config['commandParams']['output_file'])

        try:
            for file_dict in xml_configs_list:
                for filename, dict in file_dict.iteritems():
                    XmlConfig(filename,
                              conf_dir=conf_tmp_dir,
                              mode=0600,
                              **self.generate_configs_get_xml_file_content(
                                  filename, dict))
            for file_dict in env_configs_list:
                for filename, dicts in file_dict.iteritems():
                    File(os.path.join(conf_tmp_dir, filename),
                         mode=0600,
                         content=InlineTemplate(
                             self.generate_configs_get_template_file_content(
                                 filename, dicts)))

            for file_dict in properties_configs_list:
                for filename, dict in file_dict.iteritems():
                    PropertiesFile(
                        os.path.join(conf_tmp_dir, filename),
                        mode=0600,
                        properties=self.generate_configs_get_xml_file_dict(
                            filename, dict))
            with closing(tarfile.open(output_filename, "w:gz")) as tar:
                os.chmod(output_filename, 0600)
                try:
                    tar.add(conf_tmp_dir, arcname=os.path.basename("."))
                finally:
                    tar.close()

        finally:
            Directory(conf_tmp_dir, action="delete")
示例#5
0
def hbase(name=None):
    import params
    XmlConfig("hbase-site.xml",
              conf_dir=params.hbase_conf_dir,
              configurations=params.config['configurations']['hbase-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['hbase-site'])

    if params.service_map.has_key(name):
        # Manually overriding service logon user & password set by the installation package
        service_name = params.service_map[name]
        ServiceConfig(service_name,
                      action="change_user",
                      username=params.hbase_user,
                      password=Script.get_password(params.hbase_user))
示例#6
0
文件: script.py 项目: leonyux/ambari
    def generate_configs(self, env):
        """
    Generates config files and stores them as an archive in tmp_dir
    based on xml_configs_list and env_configs_list from commandParams
    """
        import params
        env.set_params(params)

        config = self.get_config()

        xml_configs_list = config['commandParams']['xml_configs_list']
        env_configs_list = config['commandParams']['env_configs_list']
        properties_configs_list = config['commandParams'][
            'properties_configs_list']

        Directory(self.get_tmp_dir(), recursive=True)

        conf_tmp_dir = tempfile.mkdtemp(dir=self.get_tmp_dir())
        output_filename = os.path.join(self.get_tmp_dir(),
                                       config['commandParams']['output_file'])

        try:
            for file_dict in xml_configs_list:
                for filename, dict in file_dict.iteritems():
                    XmlConfig(filename,
                              conf_dir=conf_tmp_dir,
                              **self.generate_configs_get_xml_file_content(
                                  filename, dict))
            for file_dict in env_configs_list:
                for filename, dicts in file_dict.iteritems():
                    File(os.path.join(conf_tmp_dir, filename),
                         content=InlineTemplate(
                             self.generate_configs_get_template_file_content(
                                 filename, dicts)))

            for file_dict in properties_configs_list:
                for filename, dict in file_dict.iteritems():
                    PropertiesFile(
                        os.path.join(conf_tmp_dir, filename),
                        properties=self.generate_configs_get_xml_file_dict(
                            filename, dict))
            archive_dir(output_filename, conf_tmp_dir)
        finally:
            Directory(conf_tmp_dir, action="delete")
示例#7
0
def hbase(name=None):
    import params

    # ensure that matching LZO libraries are installed for HBase
    lzo_utils.install_lzo_if_needed()

    Directory(params.etc_prefix_dir, mode=0755)

    Directory(params.hbase_conf_dir,
              owner=params.hbase_user,
              group=params.user_group,
              create_parents=True)

    Directory(params.java_io_tmpdir, create_parents=True, mode=0777)

    # If a file location is specified in ioengine parameter,
    # ensure that directory exists. Otherwise create the
    # directory with permissions assigned to hbase:hadoop.
    ioengine_input = params.ioengine_param
    if ioengine_input != None:
        if ioengine_input.startswith("file:/"):
            ioengine_fullpath = ioengine_input[5:]
            ioengine_dir = os.path.dirname(ioengine_fullpath)
            Directory(ioengine_dir,
                      owner=params.hbase_user,
                      group=params.user_group,
                      create_parents=True,
                      mode=0755)

    parent_dir = os.path.dirname(params.tmp_dir)
    # In case if we have several placeholders in path
    while ("${" in parent_dir):
        parent_dir = os.path.dirname(parent_dir)
    if parent_dir != os.path.abspath(os.sep):
        Directory(
            parent_dir,
            create_parents=True,
            cd_access="a",
        )
        Execute(("chmod", "1777", parent_dir), sudo=True)

    XmlConfig("hbase-site.xml",
              conf_dir=params.hbase_conf_dir,
              configurations=params.config['configurations']['hbase-site'],
              configuration_attributes=params.config['configurationAttributes']
              ['hbase-site'],
              owner=params.hbase_user,
              group=params.user_group)

    if check_stack_feature(StackFeature.PHOENIX_CORE_HDFS_SITE_REQUIRED,
                           params.version_for_stack_feature_checks):
        XmlConfig(
            "core-site.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['core-site'],
            configuration_attributes=params.config['configurationAttributes']
            ['core-site'],
            owner=params.hbase_user,
            group=params.user_group)
        if 'hdfs-site' in params.config['configurations']:
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.hbase_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.hbase_user,
                group=params.user_group)
    else:
        File(format("{params.hbase_conf_dir}/hdfs-site.xml"), action="delete")
        File(format("{params.hbase_conf_dir}/core-site.xml"), action="delete")

    if 'hbase-policy' in params.config['configurations']:
        XmlConfig(
            "hbase-policy.xml",
            conf_dir=params.hbase_conf_dir,
            configurations=params.config['configurations']['hbase-policy'],
            configuration_attributes=params.config['configurationAttributes']
            ['hbase-policy'],
            owner=params.hbase_user,
            group=params.user_group)
    # Manually overriding ownership of file installed by hadoop package
    else:
        File(format("{params.hbase_conf_dir}/hbase-policy.xml"),
             owner=params.hbase_user,
             group=params.user_group)

    File(
        format("{hbase_conf_dir}/hbase-env.sh"),
        owner=params.hbase_user,
        content=InlineTemplate(params.hbase_env_sh_template),
        group=params.user_group,
    )

    # On some OS this folder could be not exists, so we will create it before pushing there files
    Directory(params.limits_conf_dir,
              create_parents=True,
              owner='root',
              group='root')

    File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
         owner='root',
         group='root',
         mode=0644,
         content=Template("hbase.conf.j2"))

    hbase_TemplateConfig(
        params.metric_prop_file_name,
        tag='GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS')

    hbase_TemplateConfig('regionservers')

    if params.security_enabled:
        hbase_TemplateConfig(format("hbase_{name}_jaas.conf"))

    if name != "client":
        Directory(
            params.pid_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        Directory(
            params.log_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

    if (params.log4j_props != None):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user,
             content=InlineTemplate(params.log4j_props))
    elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
        File(format("{params.hbase_conf_dir}/log4j.properties"),
             mode=0644,
             group=params.user_group,
             owner=params.hbase_user)
    if name == "master" and params.default_fs:
        if not params.hbase_hdfs_root_dir_protocol or params.hbase_hdfs_root_dir_protocol == urlparse(
                params.default_fs).scheme:
            params.HdfsResource(params.hbase_hdfs_root_dir,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hbase_user)
        params.HdfsResource(params.hbase_staging_dir,
                            type="directory",
                            action="create_on_execute",
                            owner=params.hbase_user,
                            mode=0711)
        if params.create_hbase_home_directory:
            params.HdfsResource(params.hbase_home_directory,
                                type="directory",
                                action="create_on_execute",
                                owner=params.hbase_user,
                                mode=0755)
        params.HdfsResource(None, action="execute")

    if name in ('master', 'regionserver') and not params.default_fs:
        Directory(
            params.hbase_staging_dir,
            owner=params.hbase_user,
            create_parents=True,
            cd_access="a",
            mode=0711,
        )

    if params.phoenix_enabled:
        Package(params.phoenix_package,
                retry_on_repo_unavailability=params.
                agent_stack_retry_on_unavailability,
                retry_count=params.agent_stack_retry_count)
示例#8
0
def setup_spark(env, type, upgrade_type=None, action=None, config_dir=None):
  """
  :param env: Python environment
  :param type: Spark component type
  :param upgrade_type: If in a stack upgrade, either UPGRADE_TYPE_ROLLING or UPGRADE_TYPE_NON_ROLLING
  :param action: Action to perform, such as generate configs
  :param config_dir: Optional config directory to write configs to.
  """

  import params

  # ensure that matching LZO libraries are installed for Spark
  lzo_utils.install_lzo_if_needed()

  if config_dir is None:
    config_dir = params.spark_conf

  Directory([params.spark_pid_dir, params.spark_log_dir],
            owner=params.spark_user,
            group=params.user_group,
            mode=0775,
            create_parents = True,
            cd_access = 'a',
  )
  if type == 'server' and action == 'config':
    params.HdfsResource(params.spark_hdfs_user_dir,
                       type="directory",
                       action="create_on_execute",
                       owner=params.spark_user,
                       mode=0775
    )
    params.HdfsResource(None, action="execute")

  PropertiesFile(os.path.join(config_dir, "spark-defaults.conf"),
    properties = params.config['configurations']['spark-defaults'],
    key_value_delimiter = " ",
    owner=params.spark_user,
    group=params.spark_group,
    mode=0644
  )

  # create spark-env.sh in etc/conf dir
  File(os.path.join(config_dir, 'spark-env.sh'),
       owner=params.spark_user,
       group=params.spark_group,
       content=InlineTemplate(params.spark_env_sh),
       mode=0644,
  )

  #create log4j.properties in etc/conf dir
  File(os.path.join(config_dir, 'log4j.properties'),
       owner=params.spark_user,
       group=params.spark_group,
       content=params.spark_log4j_properties,
       mode=0644,
  )

  #create metrics.properties in etc/conf dir
  File(os.path.join(config_dir, 'metrics.properties'),
       owner=params.spark_user,
       group=params.spark_group,
       content=InlineTemplate(params.spark_metrics_properties),
       mode=0644
  )

  Directory(params.spark_logs_dir,
       owner=params.spark_user,
       group=params.spark_group,
       mode=0755,
  )

  if params.is_hive_installed:
    XmlConfig("hive-site.xml",
          conf_dir=config_dir,
          configurations=params.spark_hive_properties,
          owner=params.spark_user,
          group=params.spark_group,
          mode=0644)

  if params.has_spark_thriftserver:
    PropertiesFile(params.spark_thrift_server_conf_file,
      properties = params.config['configurations']['spark-thrift-sparkconf'],
      owner = params.hive_user,
      group = params.user_group,
      key_value_delimiter = " ",
      mode=0644
    )

  effective_version = params.version if upgrade_type is not None else params.version_for_stack_feature_checks
  if effective_version:
    effective_version = format_stack_version(effective_version)

  if check_stack_feature(StackFeature.SPARK_JAVA_OPTS_SUPPORT, effective_version):
    File(os.path.join(params.spark_conf, 'java-opts'),
      owner=params.spark_user,
      group=params.spark_group,
      content=InlineTemplate(params.spark_javaopts_properties),
      mode=0644
    )
  else:
    File(os.path.join(params.spark_conf, 'java-opts'),
      action="delete"
    )

  if params.spark_thrift_fairscheduler_content and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
    # create spark-thrift-fairscheduler.xml
    File(os.path.join(config_dir,"spark-thrift-fairscheduler.xml"),
      owner=params.spark_user,
      group=params.spark_group,
      mode=0755,
      content=InlineTemplate(params.spark_thrift_fairscheduler_content)
    )
示例#9
0
def setup_spark(env, type, upgrade_type = None, action = None):
  import params

  # ensure that matching LZO libraries are installed for Spark
  lzo_utils.install_lzo_if_needed()

  Directory([params.spark_pid_dir, params.spark_log_dir],
            owner=params.spark_user,
            group=params.user_group,
            mode=0775,
            create_parents = True,
            cd_access = 'a',
  )
  if type == 'server' and action == 'config':
    params.HdfsResource(params.spark_hdfs_user_dir,
                       type="directory",
                       action="create_on_execute",
                       owner=params.spark_user,
                       mode=0775
    )
    params.HdfsResource(None, action="execute")

  PropertiesFile(format("{spark_conf}/spark-defaults.conf"),
    properties = params.config['configurations']['spark-defaults'],
    key_value_delimiter = " ",
    owner=params.spark_user,
    group=params.spark_group,
    mode=0644
  )

  # create spark-env.sh in etc/conf dir
  File(os.path.join(params.spark_conf, 'spark-env.sh'),
       owner=params.spark_user,
       group=params.spark_group,
       content=InlineTemplate(params.spark_env_sh),
       mode=0644,
  )

  #create log4j.properties in etc/conf dir
  File(os.path.join(params.spark_conf, 'log4j.properties'),
       owner=params.spark_user,
       group=params.spark_group,
       content=params.spark_log4j_properties,
       mode=0644,
  )

  #create metrics.properties in etc/conf dir
  File(os.path.join(params.spark_conf, 'metrics.properties'),
       owner=params.spark_user,
       group=params.spark_group,
       content=InlineTemplate(params.spark_metrics_properties),
       mode=0644
  )

  if params.is_hive_installed:
    XmlConfig("hive-site.xml",
          conf_dir=params.spark_conf,
          configurations=params.spark_hive_properties,
          owner=params.spark_user,
          group=params.spark_group,
          mode=0644)

  if params.has_spark_thriftserver:
    PropertiesFile(params.spark_thrift_server_conf_file,
      properties = params.config['configurations']['spark-thrift-sparkconf'],
      owner = params.hive_user,
      group = params.user_group,
      key_value_delimiter = " ",
      mode=0644
    )

  effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
  if effective_version:
    effective_version = format_stack_version(effective_version)

  if params.spark_thrift_fairscheduler_content and effective_version and check_stack_feature(StackFeature.SPARK_16PLUS, effective_version):
    # create spark-thrift-fairscheduler.xml
    File(os.path.join(params.spark_conf,"spark-thrift-fairscheduler.xml"),
      owner=params.spark_user,
      group=params.spark_group,
      mode=0755,
      content=InlineTemplate(params.spark_thrift_fairscheduler_content)
    )
示例#10
0
def druid(upgrade_type=None, nodeType=None):
  import params
  ensure_base_directories()

  # Environment Variables
  File(format("{params.druid_conf_dir}/druid-env.sh"),
       owner=params.druid_user,
       content=InlineTemplate(params.druid_env_sh_template)
       )

  # common config
  druid_common_config = mutable_config_dict(params.config['configurations']['druid-common'])
  # User cannot override below configs
  druid_common_config['druid.host'] = params.hostname
  druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir
  druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
  druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][
    'druid.service']
  druid_common_config['druid.selectors.coordinator.serviceName'] = \
    params.config['configurations']['druid-coordinator']['druid.service']

  # delete the password and user if empty otherwiswe derby will fail.
  if 'derby' == druid_common_config['druid.metadata.storage.type']:
    del druid_common_config['druid.metadata.storage.connector.user']
    del druid_common_config['druid.metadata.storage.connector.password']

  druid_env_config = mutable_config_dict(params.config['configurations']['druid-env'])

  PropertiesFile("common.runtime.properties",
                 dir=params.druid_common_conf_dir,
                 properties=druid_common_config,
                 owner=params.druid_user,
                 group=params.user_group,
                 )
  Logger.info("Created common.runtime.properties")

  File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
       mode=0644,
       owner=params.druid_user,
       group=params.user_group,
       content=InlineTemplate(params.log4j_props)
       )
  Logger.info("Created log4j file")

  File("/etc/logrotate.d/druid",
       mode=0644,
       owner='root',
       group='root',
       content=InlineTemplate(params.logrotate_props)
       )

  Logger.info("Created log rotate file")

  # Write Hadoop Configs if configured
  if 'core-site' in params.config['configurations']:
    XmlConfig("core-site.xml",
              conf_dir=params.druid_common_conf_dir,
              configurations=params.config['configurations']['core-site'],
              configuration_attributes=params.config['configuration_attributes']['core-site'],
              owner=params.druid_user,
              group=params.user_group
              )

  if 'mapred-site' in params.config['configurations']:
    XmlConfig("mapred-site.xml",
              conf_dir=params.druid_common_conf_dir,
              configurations=params.config['configurations']['mapred-site'],
              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
              owner=params.druid_user,
              group=params.user_group
              )

  if 'yarn-site' in params.config['configurations']:
    XmlConfig("yarn-site.xml",
              conf_dir=params.druid_common_conf_dir,
              configurations=params.config['configurations']['yarn-site'],
              configuration_attributes=params.config['configuration_attributes']['yarn-site'],
              owner=params.druid_user,
              group=params.user_group
              )

  if 'hdfs-site' in params.config['configurations']:
    XmlConfig("hdfs-site.xml",
              conf_dir=params.druid_common_conf_dir,
              configurations=params.config['configurations']['hdfs-site'],
              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
              owner=params.druid_user,
              group=params.user_group
              )

  # node specific configs
  for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']:
    node_config_dir = format('{params.druid_conf_dir}/{node_type}')
    node_type_lowercase = node_type.lower()

    # Write runtime.properties file
    node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')])
    PropertiesFile("runtime.properties",
                   dir=node_config_dir,
                   properties=node_config,
                   owner=params.druid_user,
                   group=params.user_group,
                   )
    Logger.info(format("Created druid-{node_type_lowercase} runtime.properties"))

    # Write jvm configs
    File(format('{node_config_dir}/jvm.config'),
         owner=params.druid_user,
         group=params.user_group,
         content=InlineTemplate(
           "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
           node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')],
           log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"),
           node_direct_memory=druid_env_config[
             format('druid.{node_type_lowercase}.jvm.direct.memory')],
           node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')])
         )
    Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))

  # All druid nodes have dependency on hdfs_client
  ensure_hadoop_directories()
  # Pull all required dependencies
  pulldeps()