Ejemplo n.º 1
0
def get_running_topologies(params):
  Logger.info('Getting Running Storm Topologies from Storm REST Server')
  Logger.info('Security enabled? ' + str(params.security_enabled))

  # Want to sudo to the metron user and kinit as them so we aren't polluting root with Metron's Kerberos tickets.
  # This is becuase we need to run a command with a return as the metron user. Sigh
  negotiate = '--negotiate -u : ' if params.security_enabled else ''
  cmd = ambari_format(
    'curl --max-time 3 ' + negotiate + '{storm_rest_addr}/api/v1/topology/summary')

  if params.security_enabled:
    kinit(params.kinit_path_local,
          params.metron_keytab_path,
          params.metron_principal_name,
          execute_user=params.metron_user)

  Logger.info('Running cmd: ' + cmd)
  return_code, stdout, stderr = get_user_call_output(cmd,
                                                     user=params.metron_user,
                                                     is_checked_call=False)

  if (return_code != 0):
    return {}

  try:
    stormjson = json.loads(stdout)
  except ValueError, e:
    Logger.info('Stdout: ' + str(stdout))
    Logger.info('Stderr: ' + str(stderr))
    Logger.exception(str(e))
    return {}
Ejemplo n.º 2
0
    def action_delayed(self, action_name, main_resource):
        main_resource.assert_parameter_is_set('user')

        if main_resource.resource.security_enabled:
            main_resource.kinit()

        nameservices = namenode_ha_utils.get_nameservices(
            main_resource.resource.hdfs_site)
        if not nameservices:
            self.action_delayed_for_nameservice(None, action_name,
                                                main_resource)
        else:
            for nameservice in nameservices:
                try:
                    self.action_delayed_for_nameservice(
                        nameservice, action_name, main_resource)
                except namenode_ha_utils.NoActiveNamenodeException as ex:
                    # one of ns can be down (during initial start forexample) no need to worry for federated cluster
                    if len(nameservices) > 1:
                        Logger.exception(
                            "Cannot run HdfsResource for nameservice {0}. Due to no active namenode present"
                            .format(nameservice))
                    else:
                        raise
Ejemplo n.º 3
0
    def _cleanup_past_llap_package_dirs(self):
      try:
        import params
        Logger.info("Determining previous run 'LLAP package' folder(s) to be deleted ....")
        llap_package_folder_name_prefix = "llap-slider" # Package name is like : llap-sliderYYYY-MM-DD-HH:MM:SS
        num_folders_to_retain = 3  # Hardcoding it as of now, as no considerable use was found to provide an env param.
        file_names = [dir_name for dir_name in os.listdir(Script.get_tmp_dir())
                      if dir_name.startswith(llap_package_folder_name_prefix)]

        file_names.sort()
        del file_names[-num_folders_to_retain:] # Ignore 'num_folders_to_retain' latest package folders.
        Logger.info("Previous run 'LLAP package' folder(s) to be deleted = {0}".format(file_names))

        if file_names:
          for path in file_names:
            abs_path = Script.get_tmp_dir()+"/"+path
            Directory(abs_path,
                      action = "delete",
                      ignore_failures = True
            )
        else:
          Logger.info("No '{0}*' folder deleted.".format(llap_package_folder_name_prefix))
      except:
        Logger.exception("Exception while doing cleanup for past 'LLAP package(s)':")
 def unlock(self):
   """
   Unlocks the lock file descriptor.
   """
   if not self.enabled:
     return
   import fcntl
   Logger.info("Releasing the lock on {0}".format(self.lock_file_name))
   if self.acquired:
     try:
       fcntl.lockf(self.lock_file, fcntl.LOCK_UN)
     except:
       if self.skip_fcntl_failures:
         Logger.exception("Fcntl call raised an exception. The lock was not released. "
                          "Continuing as skip_fcntl_failures is set to True")
       else:
         raise
     else:
       self.acquired = False
       try:
         self.lock_file.close()
         self.lock_file = None
       except IOError:
         Logger.warning("Failed to close {0}".format(self.lock_file_name))
Ejemplo n.º 5
0
  def actionexecute(self, env):
    Logger.info("Host checks started.")
    config = Script.get_config()
    tmp_dir = Script.get_tmp_dir()
    report_file_handler_dict = {}

    #print "CONFIG: " + str(config)

    check_execute_list = config['commandParams']['check_execute_list']
    if check_execute_list == '*BEFORE_CLEANUP_HOST_CHECKS*':
      check_execute_list = BEFORE_CLEANUP_HOST_CHECKS
    structured_output = {}

    Logger.info("Check execute list: " + str(check_execute_list))

    # check each of the commands; if an unknown exception wasn't handled
    # by the functions, then produce a generic exit_code : 1
    if CHECK_JAVA_HOME in check_execute_list:
      try :
        java_home_check_structured_output = self.execute_java_home_available_check(config)
        structured_output[CHECK_JAVA_HOME] = java_home_check_structured_output
      except Exception, exception:
        Logger.exception("There was an unexpected error while checking for the Java home location: " + str(exception))
        structured_output[CHECK_JAVA_HOME] = {"exit_code" : 1, "message": str(exception)}
Ejemplo n.º 6
0
class CheckHost(Script):
    # Package prefixes that are used to find repos (then repos are used to find other packages)
    PACKAGES = [
        "^hadoop.*$",
        "^zookeeper.*$",
        "^webhcat.*$",
        "^oozie.*$",
        "^ambari.*$",
        "^.+-manager-server-db.*$",
        "^.+-manager-daemons.*$",
        "^mahout[_\-]\d.*$",
        "^spark.*$",
        "^falcon.*$",
        "^hbase.*$",
        "^kafka.*$",
        "^knox.*$",
        "^slider.*$",
        "^sqoop.*$",
        "^storm.*$",
        "^flume.*$",
        "^hcatalog.*$",
        "^phoenix.*$",
        "^ranger.*$",
        "^accumulo.*$",
        "^hive_.*$",
        "^pig[_\-.].*$"  # there's a default 'pigz' package which we should avoid
    ]

    # ignore packages from repos whose names start with these strings
    IGNORE_PACKAGES_FROM_REPOS = ["installed"]

    # ignore required packages
    IGNORE_PACKAGES = [
        "epel-release",
        "ambari-server",
        "ambari-agent",
        "nagios",
        # ganglia related:
        "ganglia",
        "libganglia",
        "libconfuse",
        "perl",
        "rrdtool",
        "python-rrdtool",
        "gmetad",
        "librrd",
        "rrdcached"
    ]

    # Additional packages to look for (search packages that start with these)
    ADDITIONAL_PACKAGES = ["ambari-log4j"]

    # ignore repos from the list of repos to be cleaned
    IGNORE_REPOS = ["HDP-UTILS", "AMBARI", "BASE", "EXTRAS"]

    def __init__(self):
        self.reportFileHandler = HostCheckReportFileHandler()
        self.pkg_provider = get_provider("Package")

    def actionexecute(self, env):
        Logger.info("Host checks started.")
        config = Script.get_config()
        tmp_dir = Script.get_tmp_dir()
        report_file_handler_dict = {}

        #print "CONFIG: " + str(config)

        check_execute_list = config['commandParams']['check_execute_list']
        if check_execute_list == '*BEFORE_CLEANUP_HOST_CHECKS*':
            check_execute_list = BEFORE_CLEANUP_HOST_CHECKS
        structured_output = {}

        Logger.info("Check execute list: " + str(check_execute_list))

        # check each of the commands; if an unknown exception wasn't handled
        # by the functions, then produce a generic exit_code : 1
        if CHECK_JAVA_HOME in check_execute_list:
            try:
                java_home_check_structured_output = self.execute_java_home_available_check(
                    config)
                structured_output[
                    CHECK_JAVA_HOME] = java_home_check_structured_output
            except Exception, exception:
                Logger.exception(
                    "There was an unexpected error while checking for the Java home location: "
                    + str(exception))
                structured_output[CHECK_JAVA_HOME] = {
                    "exit_code": 1,
                    "message": str(exception)
                }

        if CHECK_DB_CONNECTION in check_execute_list:
            try:
                db_connection_check_structured_output = self.execute_db_connection_check(
                    config, tmp_dir)
                structured_output[
                    CHECK_DB_CONNECTION] = db_connection_check_structured_output
            except Exception, exception:
                Logger.exception(
                    "There was an unknown error while checking database connectivity: "
                    + str(exception))
                structured_output[CHECK_DB_CONNECTION] = {
                    "exit_code": 1,
                    "message": str(exception)
                }
Ejemplo n.º 7
0
    def execute_db_connection_check(self, config, tmp_dir):
        Logger.info("DB connection check started.")

        # initialize needed data

        ambari_server_hostname = config['commandParams']['ambari_server_host']
        check_db_connection_jar_name = "DBConnectionVerification.jar"
        jdk_location = config['commandParams']['jdk_location']
        java_home = config['commandParams']['java_home']
        db_name = config['commandParams']['db_name']
        no_jdbc_error_message = None

        if db_name == DB_MYSQL:
            jdbc_driver_mysql_name = default(
                "/hostLevelParams/custom_mysql_jdbc_name", None)
            if not jdbc_driver_mysql_name:
                no_jdbc_error_message = "The MySQL JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=mysql --jdbc-driver=/path/to/jdbc_driver'."
            else:
                jdbc_url = jdk_location + jdbc_driver_mysql_name
                jdbc_driver_class = JDBC_DRIVER_CLASS_MYSQL
                jdbc_name = jdbc_driver_mysql_name
        elif db_name == DB_ORACLE:
            jdbc_driver_oracle_name = default(
                "/hostLevelParams/custom_oracle_jdbc_name", None)
            if not jdbc_driver_oracle_name:
                no_jdbc_error_message = "The Oracle JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=oracle --jdbc-driver=/path/to/jdbc_driver'."
            else:
                jdbc_url = jdk_location + jdbc_driver_oracle_name
                jdbc_driver_class = JDBC_DRIVER_CLASS_ORACLE
                jdbc_name = jdbc_driver_oracle_name
        elif db_name == DB_POSTGRESQL:
            jdbc_driver_postgres_name = default(
                "/hostLevelParams/custom_postgres_jdbc_name", None)
            if not jdbc_driver_postgres_name:
                no_jdbc_error_message = "The Postgres JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=postgres --jdbc-driver=/path/to/jdbc_driver'."
            else:
                jdbc_url = jdk_location + jdbc_driver_postgres_name
                jdbc_driver_class = JDBC_DRIVER_CLASS_POSTGRESQL
                jdbc_name = jdbc_driver_postgres_name
        elif db_name == DB_MSSQL:
            jdbc_driver_mssql_name = default(
                "/hostLevelParams/custom_mssql_jdbc_name", None)
            if not jdbc_driver_mssql_name:
                no_jdbc_error_message = "The MSSQL JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=mssql --jdbc-driver=/path/to/jdbc_driver'."
            else:
                jdbc_url = jdk_location + jdbc_driver_mssql_name
                jdbc_driver_class = JDBC_DRIVER_CLASS_MSSQL
                jdbc_name = jdbc_driver_mssql_name
        elif db_name == DB_SQLA:
            jdbc_driver_sqla_name = default(
                "/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
            if not jdbc_driver_sqla_name:
                no_jdbc_error_message = "The SQLAnywhere JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=sqlanywhere --jdbc-driver=/path/to/jdbc_driver'."
            else:
                jdbc_url = jdk_location + jdbc_driver_sqla_name
                jdbc_driver_class = JDBC_DRIVER_CLASS_SQLA
                jdbc_name = jdbc_driver_sqla_name
        else:
            no_jdbc_error_message = format(
                "'{db_name}' database type not supported.")

        if no_jdbc_error_message:
            Logger.warning(no_jdbc_error_message)
            db_connection_check_structured_output = {
                "exit_code": 1,
                "message": no_jdbc_error_message
            }
            return db_connection_check_structured_output

        db_connection_url = config['commandParams']['db_connection_url']
        user_name = config['commandParams']['user_name']
        user_passwd = config['commandParams']['user_passwd']
        agent_cache_dir = os.path.abspath(
            config["hostLevelParams"]["agentCacheDir"])
        check_db_connection_url = jdk_location + check_db_connection_jar_name
        jdbc_path = os.path.join(agent_cache_dir, jdbc_name)
        class_path_delimiter = ":"
        if db_name == DB_SQLA:
            jdbc_jar_path = agent_cache_dir + JDBC_DRIVER_SQLA_JAR_PATH_IN_ARCHIVE
            java_library_path = agent_cache_dir + JARS_PATH_IN_ARCHIVE_SQLA + class_path_delimiter + agent_cache_dir + \
                                LIBS_PATH_IN_ARCHIVE_SQLA
        else:
            jdbc_jar_path = jdbc_path
            java_library_path = agent_cache_dir

        check_db_connection_path = os.path.join(agent_cache_dir,
                                                check_db_connection_jar_name)

        java_bin = "java"
        if OSCheck.is_windows_family():
            java_bin = "java.exe"
            class_path_delimiter = ";"

        java_exec = os.path.join(java_home, "bin", java_bin)

        if ('jdk_name' not in config['commandParams'] or config['commandParams']['jdk_name'] == None \
            or config['commandParams']['jdk_name'] == '') and not os.path.isfile(java_exec):
            message = "Custom java is not available on host. Please install it. Java home should be the same as on server. " \
                      "\n"
            Logger.warning(message)
            db_connection_check_structured_output = {
                "exit_code": 1,
                "message": message
            }
            return db_connection_check_structured_output

        environment = {"no_proxy": format("{ambari_server_hostname}")}
        # download and install java if it doesn't exists
        if not os.path.isfile(java_exec):
            jdk_name = config['commandParams']['jdk_name']
            jdk_url = "{0}/{1}".format(jdk_location, jdk_name)
            jdk_download_target = os.path.join(agent_cache_dir, jdk_name)
            java_dir = os.path.dirname(java_home)
            try:
                download_file(jdk_url, jdk_download_target)
            except Exception, e:
                message = "Error downloading JDK from Ambari Server resources. Check network access to " \
                          "Ambari Server.\n" + str(e)
                Logger.exception(message)
                db_connection_check_structured_output = {
                    "exit_code": 1,
                    "message": message
                }
                return db_connection_check_structured_output

            if jdk_name.endswith(".exe"):
                install_cmd = "{0} /s INSTALLDIR={1} STATIC=1 WEB_JAVA=0 /L \\var\\log\\ambari-agent".format(
                    os_utils.quote_path(jdk_download_target),
                    os_utils.quote_path(java_home),
                )
                install_path = [java_dir]
                try:
                    Execute(install_cmd, path=install_path)
                except Exception, e:
                    message = "Error installing java.\n" + str(e)
                    Logger.exception(message)
                    db_connection_check_structured_output = {
                        "exit_code": 1,
                        "message": message
                    }
                    return db_connection_check_structured_output
Ejemplo n.º 8
0
                    "There was an unknown error while checking database connectivity: "
                    + str(exception))
                structured_output[CHECK_DB_CONNECTION] = {
                    "exit_code": 1,
                    "message": str(exception)
                }

        if CHECK_HOST_RESOLUTION in check_execute_list:
            try:
                host_resolution_structured_output = self.execute_host_resolution_check(
                    config)
                structured_output[
                    CHECK_HOST_RESOLUTION] = host_resolution_structured_output
            except Exception, exception:
                Logger.exception(
                    "There was an unknown error while checking IP address lookups: "
                    + str(exception))
                structured_output[CHECK_HOST_RESOLUTION] = {
                    "exit_code": 1,
                    "message": str(exception)
                }
        if CHECK_LAST_AGENT_ENV in check_execute_list:
            try:
                last_agent_env_structured_output = self.execute_last_agent_env_check(
                )
                structured_output[
                    CHECK_LAST_AGENT_ENV] = last_agent_env_structured_output
            except Exception, exception:
                Logger.exception(
                    "There was an unknown error while checking last host environment details: "
                    + str(exception))
Ejemplo n.º 9
0
def setup_ranger_yarn():
    import params

    if params.enable_ranger_yarn:

        if params.retryAble:
            Logger.info(
                "YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !"
            )
        else:
            Logger.info(
                "YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
            )

        if params.xa_audit_hdfs_is_enabled:
            try:
                params.HdfsResource("/ranger/audit",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hdfs_user,
                                    group=params.hdfs_user,
                                    mode=0755,
                                    recursive_chmod=True)
                params.HdfsResource("/ranger/audit/yarn",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.yarn_user,
                                    group=params.yarn_user,
                                    mode=0700,
                                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")
            except Exception, err:
                Logger.exception(
                    "Audit directory creation in HDFS for YARN Ranger plugin failed with error:\n{0}"
                    .format(err))

        setup_ranger_plugin(
            'hadoop-yarn-resourcemanager',
            'yarn',
            None,
            None,
            None,
            None,
            params.java64_home,
            params.repo_name,
            params.yarn_ranger_plugin_repo,
            params.ranger_env,
            params.ranger_plugin_properties,
            params.policy_user,
            params.policymgr_mgr_url,
            params.enable_ranger_yarn,
            conf_dict=params.hadoop_conf_dir,
            component_user=params.yarn_user,
            component_group=params.user_group,
            cache_service_list=['yarn'],
            plugin_audit_properties=params.config['configurations']
            ['ranger-yarn-audit'],
            plugin_audit_attributes=params.config['configurationAttributes']
            ['ranger-yarn-audit'],
            plugin_security_properties=params.config['configurations']
            ['ranger-yarn-security'],
            plugin_security_attributes=params.config['configurationAttributes']
            ['ranger-yarn-security'],
            plugin_policymgr_ssl_properties=params.config['configurations']
            ['ranger-yarn-policymgr-ssl'],
            plugin_policymgr_ssl_attributes=params.
            config['configurationAttributes']['ranger-yarn-policymgr-ssl'],
            component_list=['hadoop-yarn-resourcemanager'],
            audit_db_is_enabled=params.xa_audit_db_is_enabled,
            credential_file=params.credential_file,
            xa_audit_db_password=params.xa_audit_db_password,
            ssl_truststore_password=params.ssl_truststore_password,
            ssl_keystore_password=params.ssl_keystore_password,
            api_version='v2',
            skip_if_rangeradmin_down=not params.retryAble,
            is_security_enabled=params.security_enabled,
            is_stack_supports_ranger_kerberos=params.
            stack_supports_ranger_kerberos,
            component_user_principal=params.rm_principal_name
            if params.security_enabled else None,
            component_user_keytab=params.rm_keytab
            if params.security_enabled else None)
Ejemplo n.º 10
0
def setup_ranger_kafka():
    import params

    if params.enable_ranger_kafka:

        from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin

        if params.retryAble:
            Logger.info(
                "Kafka: Setup ranger: command retry enables thus retrying if ranger admin is down !"
            )
        else:
            Logger.info(
                "Kafka: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
            )

        if params.has_namenode and params.xa_audit_hdfs_is_enabled:
            try:
                params.HdfsResource("/ranger/audit",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hdfs_user,
                                    group=params.hdfs_user,
                                    mode=0755,
                                    recursive_chmod=True)
                params.HdfsResource("/ranger/audit/kafka",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.kafka_user,
                                    group=params.kafka_user,
                                    mode=0700,
                                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")
                if params.is_ranger_kms_ssl_enabled:
                    Logger.info(
                        'Ranger KMS is ssl enabled, configuring ssl-client for hdfs audits.'
                    )
                    setup_configuration_file_for_required_plugins(
                        component_user=params.kafka_user,
                        component_group=params.user_group,
                        create_core_site_path=params.conf_dir,
                        configurations=params.config['configurations']
                        ['ssl-client'],
                        configuration_attributes=params.
                        config['configurationAttributes']['ssl-client'],
                        file_name='ssl-client.xml')
                else:
                    Logger.info(
                        'Ranger KMS is not ssl enabled, skipping ssl-client for hdfs audits.'
                    )
            except Exception, err:
                Logger.exception(
                    "Audit directory creation in DDPS for KAFKA Ranger plugin failed with error:\n{0}"
                    .format(err))

        setup_ranger_plugin(
            'kafka-broker',
            'kafka',
            params.previous_jdbc_jar,
            params.downloaded_custom_connector,
            params.driver_curl_source,
            params.driver_curl_target,
            params.java64_home,
            params.repo_name,
            params.kafka_ranger_plugin_repo,
            params.ranger_env,
            params.ranger_plugin_properties,
            params.policy_user,
            params.policymgr_mgr_url,
            params.enable_ranger_kafka,
            conf_dict=params.conf_dir,
            component_user=params.kafka_user,
            component_group=params.user_group,
            cache_service_list=['kafka'],
            plugin_audit_properties=params.ranger_kafka_audit,
            plugin_audit_attributes=params.ranger_kafka_audit_attrs,
            plugin_security_properties=params.ranger_kafka_security,
            plugin_security_attributes=params.ranger_kafka_security_attrs,
            plugin_policymgr_ssl_properties=params.ranger_kafka_policymgr_ssl,
            plugin_policymgr_ssl_attributes=params.
            ranger_kafka_policymgr_ssl_attrs,
            component_list=['kafka-broker'],
            audit_db_is_enabled=params.xa_audit_db_is_enabled,
            credential_file=params.credential_file,
            xa_audit_db_password=params.xa_audit_db_password,
            ssl_truststore_password=params.ssl_truststore_password,
            ssl_keystore_password=params.ssl_keystore_password,
            api_version='v2',
            skip_if_rangeradmin_down=not params.retryAble,
            is_security_enabled=params.kerberos_security_enabled,
            is_stack_supports_ranger_kerberos=params.
            stack_supports_ranger_kerberos,
            component_user_principal=params.kafka_jaas_principal
            if params.kerberos_security_enabled else None,
            component_user_keytab=params.kafka_keytab_path
            if params.kerberos_security_enabled else None)

        if params.enable_ranger_kafka:
            Execute(('cp', '--remove-destination',
                     params.setup_ranger_env_sh_source,
                     params.setup_ranger_env_sh_target),
                    not_if=format("test -f {setup_ranger_env_sh_target}"),
                    sudo=True)
            File(params.setup_ranger_env_sh_target,
                 owner=params.kafka_user,
                 group=params.user_group,
                 mode=0755)
        if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_kafka and params.kerberos_security_enabled:
            # sometimes this is a link for missing /etc/hdp directory, just remove link/file and create regular file.
            Execute(
                ('rm', '-f', os.path.join(params.conf_dir, "core-site.xml")),
                sudo=True)

            if params.has_namenode:
                Logger.info(
                    "Stack supports core-site.xml creation for Ranger plugin and Namenode is installed, creating create core-site.xml from namenode configurations"
                )
                setup_configuration_file_for_required_plugins(
                    component_user=params.kafka_user,
                    component_group=params.user_group,
                    create_core_site_path=params.conf_dir,
                    configurations=params.config['configurations']
                    ['core-site'],
                    configuration_attributes=params.
                    config['configurationAttributes']['core-site'],
                    file_name='core-site.xml',
                    xml_include_file=params.
                    mount_table_xml_inclusion_file_full_path,
                    xml_include_file_content=params.mount_table_content)
            else:
                Logger.info(
                    "Stack supports core-site.xml creation for Ranger plugin and Namenode is not installed, creating create core-site.xml from default configurations"
                )
                setup_configuration_file_for_required_plugins(
                    component_user=params.kafka_user,
                    component_group=params.user_group,
                    create_core_site_path=params.conf_dir,
                    configurations={
                        'hadoop.security.authentication':
                        'kerberos'
                        if params.kerberos_security_enabled else 'simple'
                    },
                    configuration_attributes={},
                    file_name='core-site.xml')
        else:
            Logger.info(
                "Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations"
            )
Ejemplo n.º 11
0
def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
    import params

    if params.enable_ranger_hbase:

        if params.retryAble:
            Logger.info(
                "HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !"
            )
        else:
            Logger.info(
                "HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
            )

        if params.xa_audit_hdfs_is_enabled and service_name == 'hbase-master':
            try:
                params.HdfsResource(
                    "/ranger/audit",
                    type="directory",
                    action="create_on_execute",
                    owner=params.hdfs_user,
                    group=params.hdfs_user,
                    mode=0755,
                    recursive_chmod=True)
                params.HdfsResource(
                    "/ranger/audit/hbaseMaster",
                    type="directory",
                    action="create_on_execute",
                    owner=params.hbase_user,
                    group=params.hbase_user,
                    mode=0700,
                    recursive_chmod=True)
                params.HdfsResource(
                    "/ranger/audit/hbaseRegional",
                    type="directory",
                    action="create_on_execute",
                    owner=params.hbase_user,
                    group=params.hbase_user,
                    mode=0700,
                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")
            except Exception, err:
                Logger.exception(
                    "Audit directory creation in HDFS for HBASE Ranger plugin failed with error:\n{0}"
                    .format(err))

        api_version = 'v2'

        setup_ranger_plugin(
            'hbase-client',
            'hbase',
            None,
            None,
            None,
            None,
            params.java64_home,
            params.repo_name,
            params.hbase_ranger_plugin_repo,
            params.ranger_env,
            params.ranger_plugin_properties,
            params.policy_user,
            params.policymgr_mgr_url,
            params.enable_ranger_hbase,
            conf_dict=params.hbase_conf_dir,
            component_user=params.hbase_user,
            component_group=params.user_group,
            cache_service_list=['hbaseMaster', 'hbaseRegional'],
            plugin_audit_properties=params.config['configurations']
            ['ranger-hbase-audit'],
            plugin_audit_attributes=params.config['configurationAttributes']
            ['ranger-hbase-audit'],
            plugin_security_properties=params.config['configurations']
            ['ranger-hbase-security'],
            plugin_security_attributes=params.config['configurationAttributes']
            ['ranger-hbase-security'],
            plugin_policymgr_ssl_properties=params.config['configurations']
            ['ranger-hbase-policymgr-ssl'],
            plugin_policymgr_ssl_attributes=params.
            config['configurationAttributes']['ranger-hbase-policymgr-ssl'],
            component_list=[
                'hbase-client', 'hbase-master', 'hbase-regionserver'
            ],
            audit_db_is_enabled=False,
            credential_file=params.credential_file,
            xa_audit_db_password=None,
            ssl_truststore_password=params.ssl_truststore_password,
            ssl_keystore_password=params.ssl_keystore_password,
            skip_if_rangeradmin_down=not params.retryAble,
            api_version=api_version,
            is_security_enabled=params.security_enabled,
            is_stack_supports_ranger_kerberos=params.
            stack_supports_ranger_kerberos
            if params.security_enabled else None,
            component_user_principal=params.ranger_hbase_principal
            if params.security_enabled else None,
            component_user_keytab=params.ranger_hbase_keytab
            if params.security_enabled else None)
def setup_ranger_hive_interactive(upgrade_type=None):
    import params

    if params.enable_ranger_hive:

        stack_version = None

        if upgrade_type is not None:
            stack_version = params.version

        if params.retryAble:
            Logger.info(
                "Hive2: Setup ranger: command retry enabled thus retrying if ranger admin is down !"
            )
        else:
            Logger.info(
                "Hive2: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
            )

        if params.xa_audit_hdfs_is_enabled:
            try:
                params.HdfsResource("/ranger/audit",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hdfs_user,
                                    group=params.hdfs_user,
                                    mode=0755,
                                    recursive_chmod=True)
                params.HdfsResource("/ranger/audit/hive2",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hive_user,
                                    group=params.hive_user,
                                    mode=0700,
                                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")
            except Exception, err:
                Logger.exception(
                    "Audit directory creation in HDFS for HIVE2 Ranger plugin failed with error:\n{0}"
                    .format(err))

        from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
        setup_ranger_plugin(
            'hive-server2',
            'hive',
            params.ranger_previous_jdbc_jar,
            params.ranger_downloaded_custom_connector,
            params.ranger_driver_curl_source,
            params.ranger_driver_curl_target,
            params.java64_home,
            params.repo_name,
            params.hive_ranger_plugin_repo,
            params.ranger_env,
            params.ranger_plugin_properties,
            params.policy_user,
            params.policymgr_mgr_url,
            params.enable_ranger_hive,
            conf_dict=params.hive_server_interactive_conf_dir,
            component_user=params.hive_user,
            component_group=params.user_group,
            cache_service_list=['hive-server2'],
            plugin_audit_properties=params.config['configurations']
            ['ranger-hive-audit'],
            plugin_audit_attributes=params.config['configurationAttributes']
            ['ranger-hive-audit'],
            plugin_security_properties=params.config['configurations']
            ['ranger-hive-security'],
            plugin_security_attributes=params.config['configurationAttributes']
            ['ranger-hive-security'],
            plugin_policymgr_ssl_properties=params.config['configurations']
            ['ranger-hive-policymgr-ssl'],
            plugin_policymgr_ssl_attributes=params.
            config['configurationAttributes']['ranger-hive-policymgr-ssl'],
            component_list=['hive-client', 'hive-metastore', 'hive-server2'],
            audit_db_is_enabled=False,
            credential_file=params.credential_file,
            xa_audit_db_password=None,
            ssl_truststore_password=params.ssl_truststore_password,
            ssl_keystore_password=params.ssl_keystore_password,
            stack_version_override=stack_version,
            skip_if_rangeradmin_down=not params.retryAble,
            api_version='v2',
            is_security_enabled=params.security_enabled,
            is_stack_supports_ranger_kerberos=params.
            stack_supports_ranger_kerberos,
            component_user_principal=params.hive_principal
            if params.security_enabled else None,
            component_user_keytab=params.hive_server2_keytab
            if params.security_enabled else None)
Ejemplo n.º 13
0
def setup_ranger_knox(upgrade_type=None):
  import params

  if params.enable_ranger_knox:

    stack_version = None
    if upgrade_type is not None:
      stack_version = params.version

    if params.retryAble:
      Logger.info("Knox: Setup ranger: command retry enables thus retrying if ranger admin is down !")
    else:
      Logger.info("Knox: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")

    if params.xa_audit_hdfs_is_enabled:
      if params.has_namenode:
        try:
          params.HdfsResource("/ranger/audit",
                             type="directory",
                             action="create_on_execute",
                             owner=params.hdfs_user,
                             group=params.hdfs_user,
                             mode=0755,
                             recursive_chmod=True
          )
          params.HdfsResource("/ranger/audit/knox",
                             type="directory",
                             action="create_on_execute",
                             owner=params.knox_user,
                             group=params.knox_user,
                             mode=0700,
                             recursive_chmod=True
          )
          params.HdfsResource(None, action="execute")
        except Exception, err:
          Logger.exception("Audit directory creation in DDPS for KNOX Ranger plugin failed with error:\n{0}".format(err))

        if params.namenode_hosts is not None and len(params.namenode_hosts) > 1:
          Logger.info('Ranger Knox plugin is enabled in NameNode HA environment along with audit to Hdfs enabled, creating hdfs-site.xml')
          XmlConfig("hdfs-site.xml",
            conf_dir=params.knox_conf_dir,
            configurations=params.config['configurations']['hdfs-site'],
            configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
            owner=params.knox_user,
            group=params.knox_group,
            mode=0644
          )
        else:
          File(format('{knox_conf_dir}/hdfs-site.xml'), action="delete")

    api_version = 'v2'

    setup_ranger_plugin('knox-server', 'knox', params.previous_jdbc_jar,
                        params.downloaded_custom_connector, params.driver_curl_source,
                        params.driver_curl_target, params.java_home,
                        params.repo_name, params.knox_ranger_plugin_repo,
                        params.ranger_env, params.ranger_plugin_properties,
                        params.policy_user, params.policymgr_mgr_url,
                        params.enable_ranger_knox, conf_dict=params.knox_conf_dir,
                        component_user=params.knox_user, component_group=params.knox_group, cache_service_list=['knox'],
                        plugin_audit_properties=params.config['configurations']['ranger-knox-audit'], plugin_audit_attributes=params.config['configurationAttributes']['ranger-knox-audit'],
                        plugin_security_properties=params.config['configurations']['ranger-knox-security'], plugin_security_attributes=params.config['configurationAttributes']['ranger-knox-security'],
                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-knox-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configurationAttributes']['ranger-knox-policymgr-ssl'],
                        component_list=['knox-server'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,api_version=api_version,
                        is_security_enabled = params.security_enabled,
                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
                        component_user_principal=params.knox_principal_name if params.security_enabled else None,
                        component_user_keytab=params.knox_keytab_path if params.security_enabled else None)

    if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_knox and params.security_enabled:
      if params.has_namenode:
        Logger.info("Stack supports core-site.xml creation for Ranger plugin and Namenode is installed, creating create core-site.xml from namenode configurations")
        setup_configuration_file_for_required_plugins(component_user = params.knox_user, component_group = params.knox_group,
                                             create_core_site_path = params.knox_conf_dir, configurations = params.config['configurations']['core-site'],
                                             configuration_attributes = params.config['configurationAttributes']['core-site'], file_name='core-site.xml',
                                             xml_include_file=params.mount_table_xml_inclusion_file_full_path, xml_include_file_content=params.mount_table_content)
      else:
        Logger.info("Stack supports core-site.xml creation for Ranger plugin and Namenode is not installed, creating create core-site.xml from default configurations")
        setup_configuration_file_for_required_plugins(component_user = params.knox_user, component_group = params.knox_group,
                                             create_core_site_path = params.knox_conf_dir, configurations = { 'hadoop.security.authentication' : 'kerberos' if params.security_enabled else 'simple' },
                                             configuration_attributes = {}, file_name='core-site.xml')
    else:
      Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
Ejemplo n.º 14
0
def setup_ranger_atlas(upgrade_type=None):
    import params

    if params.enable_ranger_atlas:

        from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin

        if params.retry_enabled:
            Logger.info(
                "ATLAS: Setup ranger: command retry enables thus retrying if ranger admin is down !"
            )
        else:
            Logger.info(
                "ATLAS: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
            )

        if params.has_namenode and params.xa_audit_hdfs_is_enabled:
            try:
                params.HdfsResource("/ranger/audit",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.metadata_user,
                                    group=params.user_group,
                                    mode=0755,
                                    recursive_chmod=True)
                params.HdfsResource("/ranger/audit/atlas",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.metadata_user,
                                    group=params.user_group,
                                    mode=0700,
                                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")
                if params.is_ranger_kms_ssl_enabled:
                    Logger.info(
                        'Ranger KMS is ssl enabled, configuring ssl-client for hdfs audits.'
                    )
                    setup_configuration_file_for_required_plugins(
                        component_user=params.metadata_user,
                        component_group=params.user_group,
                        create_core_site_path=params.conf_dir,
                        configurations=params.config['configurations']
                        ['ssl-client'],
                        configuration_attributes=params.
                        config['configurationAttributes']['ssl-client'],
                        file_name='ssl-client.xml')
                else:
                    Logger.info(
                        'Ranger KMS is not ssl enabled, skipping ssl-client for hdfs audits.'
                    )
            except Exception, err:
                Logger.exception(
                    "Audit directory creation in HDFS for ATLAS Ranger plugin failed with error:\n{0}"
                    .format(err))

        setup_ranger_plugin(
            'atlas-server',
            'atlas',
            None,
            params.downloaded_custom_connector,
            params.driver_curl_source,
            params.driver_curl_target,
            params.java64_home,
            params.repo_name,
            params.atlas_ranger_plugin_repo,
            params.ranger_env,
            params.ranger_plugin_properties,
            params.policy_user,
            params.policymgr_mgr_url,
            params.enable_ranger_atlas,
            conf_dict=params.conf_dir,
            component_user=params.metadata_user,
            component_group=params.user_group,
            cache_service_list=['atlas'],
            plugin_audit_properties=params.config['configurations']
            ['ranger-atlas-audit'],
            plugin_audit_attributes=params.config['configurationAttributes']
            ['ranger-atlas-audit'],
            plugin_security_properties=params.config['configurations']
            ['ranger-atlas-security'],
            plugin_security_attributes=params.config['configurationAttributes']
            ['ranger-atlas-security'],
            plugin_policymgr_ssl_properties=params.config['configurations']
            ['ranger-atlas-policymgr-ssl'],
            plugin_policymgr_ssl_attributes=params.
            config['configurationAttributes']['ranger-atlas-policymgr-ssl'],
            component_list=['atlas-server'],
            audit_db_is_enabled=False,
            credential_file=params.credential_file,
            xa_audit_db_password=None,
            ssl_truststore_password=params.ssl_truststore_password,
            ssl_keystore_password=params.ssl_keystore_password,
            api_version='v2',
            skip_if_rangeradmin_down=not params.retry_enabled,
            is_security_enabled=params.security_enabled,
            is_stack_supports_ranger_kerberos=params.
            stack_supports_ranger_kerberos,
            component_user_principal=params.atlas_jaas_principal
            if params.security_enabled else None,
            component_user_keytab=params.atlas_keytab_path
            if params.security_enabled else None)
Ejemplo n.º 15
0
def setup_ranger_storm(upgrade_type=None):
    """
  :param upgrade_type: Upgrade Type such as "rolling" or "nonrolling"
  """
    import params
    if params.enable_ranger_storm and params.security_enabled:
        site_files_create_path = format(
            '{storm_component_home_dir}/extlib-daemon/ranger-storm-plugin-impl/conf'
        )
        Directory(site_files_create_path,
                  owner=params.storm_user,
                  group=params.user_group,
                  mode=0775,
                  create_parents=True,
                  cd_access='a')

        stack_version = None
        if upgrade_type is not None:
            stack_version = params.version

        if params.retryAble:
            Logger.info(
                "Storm: Setup ranger: command retry enables thus retrying if ranger admin is down !"
            )
        else:
            Logger.info(
                "Storm: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
            )

        if params.has_namenode and params.xa_audit_hdfs_is_enabled:
            try:
                params.HdfsResource("/ranger/audit",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hdfs_user,
                                    group=params.hdfs_user,
                                    mode=0755,
                                    recursive_chmod=True)
                params.HdfsResource("/ranger/audit/storm",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.storm_user,
                                    group=params.storm_user,
                                    mode=0700,
                                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")
                if params.is_ranger_kms_ssl_enabled:
                    Logger.info(
                        'Ranger KMS is ssl enabled, configuring ssl-client for hdfs audits.'
                    )
                    setup_configuration_file_for_required_plugins(
                        component_user=params.storm_user,
                        component_group=params.user_group,
                        create_core_site_path=site_files_create_path,
                        configurations=params.config['configurations']
                        ['ssl-client'],
                        configuration_attributes=params.
                        config['configurationAttributes']['ssl-client'],
                        file_name='ssl-client.xml')
                else:
                    Logger.info(
                        'Ranger KMS is not ssl enabled, skipping ssl-client for hdfs audits.'
                    )
            except Exception, err:
                Logger.exception(
                    "Audit directory creation in DDPS for STORM Ranger plugin failed with error:\n{0}"
                    .format(err))

        api_version = 'v2'
        setup_ranger_plugin(
            'storm-nimbus',
            'storm',
            params.previous_jdbc_jar,
            params.downloaded_custom_connector,
            params.driver_curl_source,
            params.driver_curl_target,
            params.java64_home,
            params.repo_name,
            params.storm_ranger_plugin_repo,
            params.ranger_env,
            params.ranger_plugin_properties,
            params.policy_user,
            params.policymgr_mgr_url,
            params.enable_ranger_storm,
            conf_dict=params.conf_dir,
            component_user=params.storm_user,
            component_group=params.user_group,
            cache_service_list=['storm'],
            plugin_audit_properties=params.config['configurations']
            ['ranger-storm-audit'],
            plugin_audit_attributes=params.config['configurationAttributes']
            ['ranger-storm-audit'],
            plugin_security_properties=params.config['configurations']
            ['ranger-storm-security'],
            plugin_security_attributes=params.config['configurationAttributes']
            ['ranger-storm-security'],
            plugin_policymgr_ssl_properties=params.config['configurations']
            ['ranger-storm-policymgr-ssl'],
            plugin_policymgr_ssl_attributes=params.
            config['configurationAttributes']['ranger-storm-policymgr-ssl'],
            component_list=['storm-client', 'storm-nimbus'],
            audit_db_is_enabled=params.xa_audit_db_is_enabled,
            credential_file=params.credential_file,
            xa_audit_db_password=params.xa_audit_db_password,
            ssl_truststore_password=params.ssl_truststore_password,
            ssl_keystore_password=params.ssl_keystore_password,
            stack_version_override=stack_version,
            skip_if_rangeradmin_down=not params.retryAble,
            api_version=api_version,
            is_security_enabled=params.security_enabled,
            is_stack_supports_ranger_kerberos=params.
            stack_supports_ranger_kerberos,
            component_user_principal=params.ranger_storm_principal
            if params.security_enabled else None,
            component_user_keytab=params.ranger_storm_keytab
            if params.security_enabled else None)

        if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_storm and params.security_enabled:
            if params.has_namenode:

                mount_table_xml_inclusion_file_full_path = None
                mount_table_content = None
                if 'viewfs-mount-table' in params.config['configurations']:
                    xml_inclusion_file_name = 'viewfs-mount-table.xml'
                    mount_table = params.config['configurations'][
                        'viewfs-mount-table']

                    if 'content' in mount_table and mount_table[
                            'content'].strip():
                        mount_table_xml_inclusion_file_full_path = os.path.join(
                            site_files_create_path, xml_inclusion_file_name)
                        mount_table_content = mount_table['content']

                Logger.info(
                    "Stack supports core-site.xml creation for Ranger plugin and Namenode is installed, creating create core-site.xml from namenode configurations"
                )
                setup_configuration_file_for_required_plugins(
                    component_user=params.storm_user,
                    component_group=params.user_group,
                    create_core_site_path=site_files_create_path,
                    configurations=params.config['configurations']
                    ['core-site'],
                    configuration_attributes=params.
                    config['configuration_attributes']['core-site'],
                    file_name='core-site.xml',
                    xml_include_file=mount_table_xml_inclusion_file_full_path,
                    xml_include_file_content=mount_table_content)
            else:
                Logger.info(
                    "Stack supports core-site.xml creation for Ranger plugin and Namenode is not installed, creating create core-site.xml from default configurations"
                )
                setup_configuration_file_for_required_plugins(
                    component_user=params.storm_user,
                    component_group=params.user_group,
                    create_core_site_path=site_files_create_path,
                    configurations={
                        'hadoop.security.authentication':
                        'kerberos' if params.security_enabled else 'simple'
                    },
                    configuration_attributes={},
                    file_name='core-site.xml')

            if len(params.namenode_hosts) > 1:
                Logger.info(
                    'Ranger Storm plugin is enabled along with security and NameNode is HA , creating hdfs-site.xml'
                )
                setup_configuration_file_for_required_plugins(
                    component_user=params.storm_user,
                    component_group=params.user_group,
                    create_core_site_path=site_files_create_path,
                    configurations=params.config['configurations']
                    ['hdfs-site'],
                    configuration_attributes=params.
                    config['configurationAttributes']['hdfs-site'],
                    file_name='hdfs-site.xml')
            else:
                Logger.info(
                    'Ranger Storm plugin is not enabled or security is disabled, removing hdfs-site.xml'
                )
                File(format('{site_files_create_path}/hdfs-site.xml'),
                     action="delete")
        else:
            Logger.info(
                "Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations"
            )
Ejemplo n.º 16
0
  def execute(self):
    """
    Sets up logging;
    Parses command parameters and executes method relevant to command type
    """
    parser = OptionParser()
    parser.add_option("-o", "--out-files-logging", dest="log_out_files", action="store_true",
                      help="use this option to enable outputting *.out files of the service pre-start")
    (self.options, args) = parser.parse_args()

    self.log_out_files = self.options.log_out_files

    # parse arguments
    if len(args) < 6:
     print "Script expects at least 6 arguments"
     print USAGE.format(os.path.basename(sys.argv[0])) # print to stdout
     sys.exit(1)

    self.command_name = str.lower(sys.argv[1])
    self.command_data_file = sys.argv[2]
    self.basedir = sys.argv[3]
    self.stroutfile = sys.argv[4]
    self.load_structured_out()
    self.logging_level = sys.argv[5]
    Script.tmp_dir = sys.argv[6]
    # optional script arguments for forcing https protocol and ca_certs file
    if len(sys.argv) >= 8:
      Script.force_https_protocol = sys.argv[7]
    if len(sys.argv) >= 9:
      Script.ca_cert_file_path = sys.argv[8]

    logging_level_str = logging._levelNames[self.logging_level]
    Logger.initialize_logger(__name__, logging_level=logging_level_str)

    # on windows we need to reload some of env variables manually because there is no default paths for configs(like
    # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
    # in agent, so other Script executions will not be able to access to new env variables
    if OSCheck.is_windows_family():
      reload_windows_env()

    # !!! status commands re-use structured output files; if the status command doesn't update the
    # the file (because it doesn't have to) then we must ensure that the file is reset to prevent
    # old, stale structured output from a prior status command from being used
    if self.command_name == "status":
      Script.structuredOut = {}
      self.put_structured_out({})

    # make sure that script has forced https protocol and ca_certs file passed from agent
    ensure_ssl_using_protocol(Script.get_force_https_protocol_name(), Script.get_ca_cert_file_path())

    try:
      with open(self.command_data_file) as f:
        pass
        Script.config = ConfigDictionary(json.load(f))
        # load passwords here(used on windows to impersonate different users)
        Script.passwords = {}
        for k, v in _PASSWORD_MAP.iteritems():
          if get_path_from_configuration(k, Script.config) and get_path_from_configuration(v, Script.config):
            Script.passwords[get_path_from_configuration(k, Script.config)] = get_path_from_configuration(v, Script.config)

    except IOError:
      Logger.logger.exception("Can not read json file with command parameters: ")
      sys.exit(1)

    from resource_management.libraries.functions import lzo_utils

    repo_tags_to_skip = set()
    if not lzo_utils.is_gpl_license_accepted():
      repo_tags_to_skip.add("GPL")

    Script.repository_util = RepositoryUtil(Script.config, repo_tags_to_skip)

    # Run class method depending on a command type
    try:
      method = self.choose_method_to_execute(self.command_name)
      with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
        env.config.download_path = Script.tmp_dir

        if not self.is_hook():
          self.execute_prefix_function(self.command_name, 'pre', env)

        method(env)

        if not self.is_hook():
          self.execute_prefix_function(self.command_name, 'post', env)

    except Fail as ex:
      ex.pre_raise()
      raise
    finally:
      try:
        if self.should_expose_component_version(self.command_name):
          self.save_component_version_to_structured_out(self.command_name)
      except:
        Logger.exception("Reporting component version failed")
Ejemplo n.º 17
0
def enable_kms_plugin():
    import params

    if params.has_ranger_admin:

        ranger_flag = False

        if params.stack_supports_ranger_kerberos and params.security_enabled:
            if not is_empty(params.rangerkms_principal
                            ) and params.rangerkms_principal != '':
                ranger_flag = check_ranger_service_support_kerberos(
                    params.kms_user, params.rangerkms_keytab,
                    params.rangerkms_principal)
            else:
                ranger_flag = check_ranger_service_support_kerberos(
                    params.kms_user, params.spengo_keytab,
                    params.spnego_principal)
        else:
            ranger_flag = check_ranger_service()

        if not ranger_flag:
            Logger.error('Error in Get/Create service for Ranger Kms.')

        current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        File(format('{kms_conf_dir}/ranger-security.xml'),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644,
             content=format(
                 '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'))

        Directory([
            os.path.join('/etc', 'ranger', params.repo_name),
            os.path.join('/etc', 'ranger', params.repo_name, 'policycache')
        ],
                  owner=params.kms_user,
                  group=params.kms_group,
                  mode=0775,
                  create_parents=True)

        File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',
                          format('kms_{repo_name}.json')),
             owner=params.kms_user,
             group=params.kms_group,
             mode=0644)

        # remove plain-text password from xml configs
        plugin_audit_properties_copy = {}
        plugin_audit_properties_copy.update(
            params.config['configurations']['ranger-kms-audit'])

        if params.plugin_audit_password_property in plugin_audit_properties_copy:
            plugin_audit_properties_copy[
                params.plugin_audit_password_property] = "crypted"

        XmlConfig(
            "ranger-kms-audit.xml",
            conf_dir=params.kms_conf_dir,
            configurations=plugin_audit_properties_copy,
            configuration_attributes=params.config['configurationAttributes']
            ['ranger-kms-audit'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        XmlConfig(
            "ranger-kms-security.xml",
            conf_dir=params.kms_conf_dir,
            configurations=params.config['configurations']
            ['ranger-kms-security'],
            configuration_attributes=params.config['configurationAttributes']
            ['ranger-kms-security'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        # remove plain-text password from xml configs
        ranger_kms_policymgr_ssl_copy = {}
        ranger_kms_policymgr_ssl_copy.update(
            params.config['configurations']['ranger-kms-policymgr-ssl'])

        for prop in params.kms_plugin_password_properties:
            if prop in ranger_kms_policymgr_ssl_copy:
                ranger_kms_policymgr_ssl_copy[prop] = "crypted"

        XmlConfig(
            "ranger-policymgr-ssl.xml",
            conf_dir=params.kms_conf_dir,
            configurations=ranger_kms_policymgr_ssl_copy,
            configuration_attributes=params.config['configurationAttributes']
            ['ranger-kms-policymgr-ssl'],
            owner=params.kms_user,
            group=params.kms_group,
            mode=0744)

        if params.xa_audit_db_is_enabled:
            cred_setup = params.cred_setup_prefix + (
                '-f', params.credential_file, '-k', 'auditDBCred', '-v',
                PasswordString(params.xa_audit_db_password), '-c', '1')
            Execute(cred_setup,
                    environment={'JAVA_HOME': params.java_home},
                    logoutput=True,
                    sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslKeyStore', '-v',
            PasswordString(params.ssl_keystore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        cred_setup = params.cred_setup_prefix + (
            '-f', params.credential_file, '-k', 'sslTrustStore', '-v',
            PasswordString(params.ssl_truststore_password), '-c', '1')
        Execute(cred_setup,
                environment={'JAVA_HOME': params.java_home},
                logoutput=True,
                sudo=True)

        File(params.credential_file,
             owner=params.kms_user,
             group=params.kms_group,
             only_if=format("test -e {credential_file}"),
             mode=0640)

        dot_jceks_crc_file_path = os.path.join(
            os.path.dirname(params.credential_file),
            "." + os.path.basename(params.credential_file) + ".crc")

        File(dot_jceks_crc_file_path,
             owner=params.kms_user,
             group=params.kms_group,
             only_if=format("test -e {dot_jceks_crc_file_path}"),
             mode=0640)

        # create ranger kms audit directory
        if params.xa_audit_hdfs_is_enabled and params.has_namenode and params.has_hdfs_client_on_node:
            try:
                params.HdfsResource("/ranger/audit",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.hdfs_user,
                                    group=params.hdfs_user,
                                    mode=0755,
                                    recursive_chmod=True)
                params.HdfsResource("/ranger/audit/kms",
                                    type="directory",
                                    action="create_on_execute",
                                    owner=params.kms_user,
                                    group=params.kms_group,
                                    mode=0750,
                                    recursive_chmod=True)
                params.HdfsResource(None, action="execute")
            except Exception, err:
                Logger.exception(
                    "Audit directory creation in HDFS for RANGER KMS Ranger plugin failed with error:\n{0}"
                    .format(err))

        if params.xa_audit_hdfs_is_enabled and len(params.namenode_host) > 1:
            Logger.info(
                'Audit to Hdfs enabled in NameNode HA environment, creating hdfs-site.xml'
            )
            XmlConfig(
                "hdfs-site.xml",
                conf_dir=params.kms_conf_dir,
                configurations=params.config['configurations']['hdfs-site'],
                configuration_attributes=params.
                config['configurationAttributes']['hdfs-site'],
                owner=params.kms_user,
                group=params.kms_group,
                mode=0644)
        else:
            File(format('{kms_conf_dir}/hdfs-site.xml'), action="delete")
Ejemplo n.º 18
0
    def install_packages(self, env):
        """
    List of packages that are required< by service is received from the server
    as a command parameter. The method installs all packages
    from this list
    
    exclude_packages - list of regexes (possibly raw strings as well), the
    packages which match the regex won't be installed.
    NOTE: regexes don't have Python syntax, but simple package regexes which support only * and .* and ?
    """
        config = self.get_config()

        if 'host_sys_prepped' in config['hostLevelParams']:
            # do not install anything on sys-prepped host
            if config['hostLevelParams']['host_sys_prepped'] is True:
                Logger.info("Node has all packages pre-installed. Skipping.")
                return
            pass
        try:
            package_list_str = config['hostLevelParams']['package_list']
            agent_stack_retry_on_unavailability = bool(
                config['hostLevelParams']
                ['agent_stack_retry_on_unavailability'])
            agent_stack_retry_count = int(
                config['hostLevelParams']['agent_stack_retry_count'])
            pkg_provider = get_provider("Package")
            try:
                available_packages_in_repos = pkg_provider.get_available_packages_in_repos(
                    config['repositoryFile']['repositories'])
            except Exception as err:
                Logger.exception("Unable to load available packages")
                available_packages_in_repos = []
            if isinstance(package_list_str,
                          basestring) and len(package_list_str) > 0:
                package_list = json.loads(package_list_str)
                for package in package_list:
                    if self.check_package_condition(package):
                        name = self.get_package_from_available(
                            package['name'], available_packages_in_repos)
                        # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
                        # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
                        # <osFamily>any<osFamily> which would cause installation failure on Windows.
                        if OSCheck.is_windows_family():
                            if "ambari-metrics" in name:
                                Package(name)
                        else:
                            Package(name,
                                    retry_on_repo_unavailability=
                                    agent_stack_retry_on_unavailability,
                                    retry_count=agent_stack_retry_count)
        except KeyError:
            pass  # No reason to worry

        if OSCheck.is_windows_family():
            #TODO hacky install of windows msi, remove it or move to old(2.1) stack definition when component based install will be implemented
            hadoop_user = config["configurations"]["cluster-env"][
                "hadoop.user.name"]
            install_windows_msi(
                config['hostLevelParams']['jdk_location'],
                config["hostLevelParams"]["agentCacheDir"], [
                    "hdp-2.3.0.0.winpkg.msi", "hdp-2.3.0.0.cab",
                    "hdp-2.3.0.0-01.cab"
                ], hadoop_user, self.get_password(hadoop_user),
                str(config['hostLevelParams']['stack_version']))
            reload_windows_env()