def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.fs import hadoopfs from hadoop import job_tracker res = [ ] # HADOOP_HOME res.extend(validate_path(HADOOP_HOME, is_dir=True)) # HADOOP_BIN res.extend(validate_path(HADOOP_BIN, is_dir=False)) # JARs: even though these are private, we need them to run jobsub res.extend(validate_path(HADOOP_EXAMPLES_JAR, is_dir=False)) res.extend(validate_path(HADOOP_STREAMING_JAR, is_dir=False)) # HDFS_CLUSTERS for name in HDFS_CLUSTERS.keys(): cluster = HDFS_CLUSTERS[name] res.extend(hadoopfs.test_fs_configuration(cluster, HADOOP_BIN)) # MR_CLUSTERS for name in MR_CLUSTERS.keys(): cluster = MR_CLUSTERS[name] res.extend(job_tracker.test_jt_configuration(cluster)) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.fs import hadoopfs from hadoop import job_tracker res = [] # HADOOP_HOME res.extend(validate_path(HADOOP_HOME, is_dir=True)) # HADOOP_BIN res.extend(validate_path(HADOOP_BIN, is_dir=False)) # JARs: even though these are private, we need them to run jobsub res.extend(validate_path(HADOOP_EXAMPLES_JAR, is_dir=False)) res.extend(validate_path(HADOOP_STREAMING_JAR, is_dir=False)) # HDFS_CLUSTERS for name in HDFS_CLUSTERS.keys(): cluster = HDFS_CLUSTERS[name] res.extend(hadoopfs.test_fs_configuration(cluster, HADOOP_BIN)) # MR_CLUSTERS for name in MR_CLUSTERS.keys(): cluster = MR_CLUSTERS[name] res.extend(job_tracker.test_jt_configuration(cluster)) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs from liboozie.oozie_api import get_oozie res = [] status = 'down' try: status = str(get_oozie().get_oozie_status()) except: pass if 'NORMAL' not in status: res.append((status, _('The Oozie server is not available'))) class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend(validate_path(REMOTE_DEPLOYMENT_DIR, is_dir=True, fs=cluster, message=_('The deployment directory of Oozie workflows does not exist. ' 'Please run "Setup App" on the Oozie workflow page.'))) res.extend(validate_path(ConfigMock('/user/oozie/share/lib'), is_dir=True, fs=cluster, message=_('Oozie Share Lib not installed in default location.'))) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs # from liboozie.oozie_api import get_oozie res = [] # status = 'down' # try: # status = str(get_oozie().get_oozie_status()) # except: # pass # if 'NORMAL' not in status: # res.append((status, _('The Oozie server is not available'))) class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend(validate_path(REMOTE_DEPLOYMENT_DIR, is_dir=True, fs=cluster, message=_('The deployment directory of Oozie workflows does not exist. ' 'Run "Setup App" on the Oozie workflow page.'))) res.extend(validate_path(ConfigMock('/user/oozie/share/lib'), is_dir=True, fs=cluster, message=_('Oozie Share Lib not installed in default location.'))) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib import i18n res = [] if not get_secret_key(): res.append(( SECRET_KEY, unicode( _("Secret key should be configured as a random string. All sessions will be lost on restart" )))) # Validate SSL setup if SSL_CERTIFICATE.get(): res.extend(validate_path(SSL_CERTIFICATE, is_dir=False)) if not SSL_PRIVATE_KEY.get(): res.append(( SSL_PRIVATE_KEY, unicode( _("SSL private key file should be set to enable HTTPS.")))) else: res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False)) # Validate encoding if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()): res.append( (DEFAULT_SITE_ENCODING, unicode(_("Encoding not supported.")))) # Validate kerberos if KERBEROS.HUE_KEYTAB.get() is not None: res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False)) # Keytab should not be world or group accessible kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get()) if stat.S_IMODE(kt_stat.st_mode) & 0077: res.append( (KERBEROS.HUE_KEYTAB, force_unicode( _("Keytab should have 0600 permissions (has %o).") % stat.S_IMODE(kt_stat.st_mode)))) res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False)) res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False)) if LDAP.LDAP_SERVERS.get(): for ldap_record_key in LDAP.LDAP_SERVERS.get(): res.extend( validate_ldap(user, LDAP.LDAP_SERVERS.get()[ldap_record_key])) else: res.extend(validate_ldap(user, LDAP)) # Validate MYSQL storage engine of all tables res.extend(validate_mysql_storage()) return res
def test_validate_path(): reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/') assert_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True)) reset() reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/tmm/does_not_exist') assert_not_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True)) reset()
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib import i18n res = [ ] if not SECRET_KEY.get(): res.append((SECRET_KEY, unicode(_("Secret key should be configured as a random string.")))) # Validate SSL setup if SSL_CERTIFICATE.get(): res.extend(validate_path(SSL_CERTIFICATE, is_dir=False)) if not SSL_PRIVATE_KEY.get(): res.append((SSL_PRIVATE_KEY, unicode(_("SSL private key file should be set to enable HTTPS.")))) else: res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False)) # Validate encoding if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()): res.append((DEFAULT_SITE_ENCODING, unicode(_("Encoding not supported.")))) # Validate kerberos if KERBEROS.HUE_KEYTAB.get() is not None: res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False)) # Keytab should not be world or group accessible kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get()) if stat.S_IMODE(kt_stat.st_mode) & 0077: res.append((KERBEROS.HUE_KEYTAB, unicode(_("Keytab should have 0600 permissions (has %o).") % stat.S_IMODE(kt_stat.st_mode)))) res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False)) res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False)) if LDAP.NT_DOMAIN.get() is not None or \ LDAP.LDAP_USERNAME_PATTERN.get() is not None: if LDAP.LDAP_URL.get() is None: res.append((LDAP.LDAP_URL, unicode(_("LDAP is only partially configured. An LDAP URL must be provided.")))) if LDAP.LDAP_URL.get() is not None: if LDAP.NT_DOMAIN.get() is None and \ LDAP.LDAP_USERNAME_PATTERN.get() is None: res.append(LDAP.LDAP_URL, unicode(_("LDAP is only partially configured. An NT Domain or username " "search pattern must be provided."))) if LDAP.LDAP_USERNAME_PATTERN.get() is not None and \ '<username>' not in LDAP.LDAP_USERNAME_PATTERN.get(): res.append(LDAP.LDAP_USERNAME_PATTERN, unicode(_("The LDAP username pattern should contain the special" "<username> replacement string for authentication."))) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib import i18n res = [ ] if not SECRET_KEY.get(): res.append((SECRET_KEY, unicode(_("Secret key should be configured as a random string.")))) # Validate SSL setup if SSL_CERTIFICATE.get(): res.extend(validate_path(SSL_CERTIFICATE, is_dir=False)) if not SSL_PRIVATE_KEY.get(): res.append((SSL_PRIVATE_KEY, unicode(_("SSL private key file should be set to enable HTTPS.")))) else: res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False)) # Validate encoding if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()): res.append((DEFAULT_SITE_ENCODING, unicode(_("Encoding not supported.")))) # Validate kerberos if KERBEROS.HUE_KEYTAB.get() is not None: res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False)) # Keytab should not be world or group accessible kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get()) if stat.S_IMODE(kt_stat.st_mode) & 0077: res.append((KERBEROS.HUE_KEYTAB, unicode(_("Keytab should have 0600 permissions (has %o).") % stat.S_IMODE(kt_stat.st_mode)))) res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False)) res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False)) if LDAP.NT_DOMAIN.get() is not None or \ LDAP.LDAP_USERNAME_PATTERN.get() is not None: if LDAP.LDAP_URL.get() is None: res.append((LDAP.LDAP_URL, unicode(_("LDAP is only partially configured. An LDAP URL must be provided.")))) if LDAP.LDAP_URL.get() is not None: if LDAP.NT_DOMAIN.get() is None and \ LDAP.LDAP_USERNAME_PATTERN.get() is None: res.append((LDAP.LDAP_URL, unicode(_("LDAP is only partially configured. An NT Domain or username " "search pattern must be provided.")))) if LDAP.LDAP_USERNAME_PATTERN.get() is not None and \ '<username>' not in LDAP.LDAP_USERNAME_PATTERN.get(): res.append(LDAP.LDAP_USERNAME_PATTERN, unicode(_("The LDAP username pattern should contain the special" "<username> replacement string for authentication."))) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib import i18n res = [] if not SECRET_KEY.get(): res.append( ( SECRET_KEY, unicode(_("Secret key should be configured as a random string. All sessions will be lost on restart")), ) ) # Validate SSL setup if SSL_CERTIFICATE.get(): res.extend(validate_path(SSL_CERTIFICATE, is_dir=False)) if not SSL_PRIVATE_KEY.get(): res.append((SSL_PRIVATE_KEY, unicode(_("SSL private key file should be set to enable HTTPS.")))) else: res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False)) # Validate encoding if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()): res.append((DEFAULT_SITE_ENCODING, unicode(_("Encoding not supported.")))) # Validate kerberos if KERBEROS.HUE_KEYTAB.get() is not None: res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False)) # Keytab should not be world or group accessible kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get()) if stat.S_IMODE(kt_stat.st_mode) & 0077: res.append( ( KERBEROS.HUE_KEYTAB, force_unicode(_("Keytab should have 0600 permissions (has %o).") % stat.S_IMODE(kt_stat.st_mode)), ) ) res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False)) res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False)) if LDAP.LDAP_SERVERS.get(): for ldap_record_key in LDAP.LDAP_SERVERS.get(): res.extend(validate_ldap(user, LDAP.LDAP_SERVERS.get()[ldap_record_key])) else: res.extend(validate_ldap(user, LDAP)) # Validate MYSQL storage engine of all tables res.extend(validate_mysql_storage()) return res
def test_validate_path(): with tempfile.NamedTemporaryFile() as local_file: reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing(local_file.name) assert_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=False)) reset() try: reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/tmm/does_not_exist') assert_not_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True)) assert_true(False) except Exception, ex: assert_true('does not exist' in str(ex), ex)
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.fs import webhdfs from hadoop import job_tracker res = [] submit_to = [] # HDFS_CLUSTERS has_default = False for name in HDFS_CLUSTERS.keys(): cluster = HDFS_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) res.extend(webhdfs.test_fs_configuration(cluster)) if name == 'default': has_default = True if not has_default: res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.") # MR_CLUSTERS for name in MR_CLUSTERS.keys(): cluster = MR_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) res.extend(job_tracker.test_jt_configuration(cluster)) if cluster.SUBMIT_TO.get(): submit_to.append('mapred_clusters.' + name) # YARN_CLUSTERS for name in YARN_CLUSTERS.keys(): cluster = YARN_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) if cluster.SUBMIT_TO.get(): submit_to.append('yarn_clusters.' + name) # Only one cluster should have submit_to if len(submit_to) > 1: res.append(("hadoop", "Only one cluster may enable 'submit_to'. " "But it is enabled in the following clusters: " + ', '.join(submit_to))) elif len(submit_to) == 0: res.append( ("hadoop", "Please designate one of the MapReduce or " "Yarn clusters with `submit_to=true' in order to run jobs.")) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.fs import webhdfs from hadoop import job_tracker res = [] submit_to = [] # HDFS_CLUSTERS has_default = False for name in HDFS_CLUSTERS.keys(): cluster = HDFS_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) res.extend(webhdfs.test_fs_configuration(cluster)) if name == 'default': has_default = True if not has_default: res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.") # MR_CLUSTERS for name in MR_CLUSTERS.keys(): cluster = MR_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) res.extend(job_tracker.test_jt_configuration(cluster)) if cluster.SUBMIT_TO.get(): submit_to.append('mapred_clusters.' + name) # YARN_CLUSTERS for name in YARN_CLUSTERS.keys(): cluster = YARN_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) if cluster.SUBMIT_TO.get(): submit_to.append('yarn_clusters.' + name) # Only one cluster should have submit_to if len(submit_to) > 1: res.append(("hadoop", "Only one cluster may enable 'submit_to'. " "But it is enabled in the following clusters: " + ', '.join(submit_to))) elif len(submit_to) == 0: res.append(("hadoop", "Please designate one of the MapReduce or " "Yarn clusters with `submit_to=true' in order to run jobs.")) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.fs import webhdfs from hadoop import job_tracker res = [] submit_to = [] # HDFS_CLUSTERS has_default = False for name in HDFS_CLUSTERS.keys(): cluster = HDFS_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) res.extend(webhdfs.test_fs_configuration(cluster)) if name == 'default': has_default = True if not has_default: res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.") # MR_CLUSTERS mr_down = [] for name in MR_CLUSTERS.keys(): cluster = MR_CLUSTERS[name] if cluster.SUBMIT_TO.get(): res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) mr_down.extend(job_tracker.test_jt_configuration(cluster)) submit_to.append('mapred_clusters.' + name) # If HA still failing if mr_down and len(mr_down) == len(MR_CLUSTERS.keys()): res.extend(mr_down) # YARN_CLUSTERS for name in YARN_CLUSTERS.keys(): cluster = YARN_CLUSTERS[name] if cluster.SUBMIT_TO.get(): res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) submit_to.append('yarn_clusters.' + name) if not submit_to: res.append( ("hadoop", "Please designate one of the MapReduce or " "Yarn clusters with `submit_to=true' in order to run jobs.")) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs res = [] status = get_oozie_status() if 'NORMAL' not in status: res.append((status, _('The Oozie server is not available'))) class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend( validate_path( ConfigMock('/user/oozie/share/lib'), is_dir=True, fs=cluster, message=_( 'Oozie Share Lib not installed in default location.'))) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs res = [] if OOZIE_URL.get(): status = get_oozie_status(user) if 'NORMAL' not in status: res.append((status, _('The Oozie server is not available'))) class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend(validate_path(ConfigMock('/user/oozie/share/lib'), is_dir=True, fs=cluster, message=_('Oozie Share Lib not installed in default location.'))) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.fs import webhdfs from hadoop import job_tracker res = [] submit_to = [] # HDFS_CLUSTERS has_default = False for name in HDFS_CLUSTERS.keys(): cluster = HDFS_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) res.extend(webhdfs.test_fs_configuration(cluster)) if name == 'default': has_default = True if not has_default: res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.") # MR_CLUSTERS mr_down = [] for name in MR_CLUSTERS.keys(): cluster = MR_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) mr_down.extend(job_tracker.test_jt_configuration(cluster)) if cluster.SUBMIT_TO.get(): submit_to.append('mapred_clusters.' + name) # If HA still failing if mr_down and len(mr_down) == len(MR_CLUSTERS.keys()): res.extend(mr_down) # YARN_CLUSTERS for name in YARN_CLUSTERS.keys(): cluster = YARN_CLUSTERS[name] res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True)) res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True)) res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False)) if cluster.SUBMIT_TO.get(): submit_to.append('yarn_clusters.' + name) if not submit_to: res.append(("hadoop", "Please designate one of the MapReduce or " "Yarn clusters with `submit_to=true' in order to run jobs.")) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib import i18n res = [ ] if not SECRET_KEY.get(): res.append((SECRET_KEY, unicode(_("Secret key should be configured as a random string.")))) # Validate SSL setup if SSL_CERTIFICATE.get(): res.extend(validate_path(SSL_CERTIFICATE, is_dir=False)) if not SSL_PRIVATE_KEY.get(): res.append((SSL_PRIVATE_KEY, unicode(_("SSL private key file should be set to enable HTTPS.")))) else: res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False)) # Validate encoding if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()): res.append((DEFAULT_SITE_ENCODING, unicode(_("Encoding not supported.")))) # Validate kerberos if KERBEROS.HUE_KEYTAB.get() is not None: res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False)) # Keytab should not be world or group accessible kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get()) if stat.S_IMODE(kt_stat.st_mode) & 0077: res.append((KERBEROS.HUE_KEYTAB, unicode(_("Keytab should have 0600 permissions (has %o).") % stat.S_IMODE(kt_stat.st_mode)))) res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False)) res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False)) if LDAP.LDAP_URL.get() is None: res.append((LDAP.LDAP_URL, unicode(_("LDAP is only partially configured. An LDAP URL must be provided.")))) if LDAP.BIND_DN.get(): if LDAP.BIND_PASSWORD.get() is None: res.append((LDAP.BIND_PASSWORD, unicode(_("If you set bind_dn, then you must set bind_password.")))) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib import i18n res = [] if not SECRET_KEY.get(): res.append((SECRET_KEY, "Secret key should be configured as a random string.")) # Validate SSL setup if SSL_CERTIFICATE.get(): res.extend(validate_path(SSL_CERTIFICATE, is_dir=False)) if not SSL_PRIVATE_KEY.get(): res.append((SSL_PRIVATE_KEY, "SSL private key file should be set to enable HTTPS.")) else: res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False)) # Validate encoding if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()): res.append((DEFAULT_SITE_ENCODING, "Encoding not supported.")) # Validate kerberos if KERBEROS.HUE_KEYTAB.get() is not None: res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False)) # Keytab should not be world or group accessible kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get()) if stat.S_IMODE(kt_stat.st_mode) & 0077: res.append((KERBEROS.HUE_KEYTAB, "Keytab should have 0600 permissions (has %o)" % stat.S_IMODE(kt_stat.st_mode))) res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False)) res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False)) for broken_app in appmanager.BROKEN_APPS: res.append(('Working Hadoop', 'App %s requires Hadoop but Hadoop is not present.' % (broken_app, ))) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib import i18n res = [ ] if not SECRET_KEY.get(): res.append((SECRET_KEY, "Secret key should be configured as a random string.")) # Validate SSL setup if SSL_CERTIFICATE.get(): res.extend(validate_path(SSL_CERTIFICATE, is_dir=False)) if not SSL_PRIVATE_KEY.get(): res.append((SSL_PRIVATE_KEY, "SSL private key file should be set to enable HTTPS.")) else: res.extend(validate_path(SSL_PRIVATE_KEY, is_dir=False)) # Validate encoding if not i18n.validate_encoding(DEFAULT_SITE_ENCODING.get()): res.append((DEFAULT_SITE_ENCODING, "Encoding not supported.")) # Validate kerberos if KERBEROS.HUE_KEYTAB.get() is not None: res.extend(validate_path(KERBEROS.HUE_KEYTAB, is_dir=False)) # Keytab should not be world or group accessible kt_stat = os.stat(KERBEROS.HUE_KEYTAB.get()) if stat.S_IMODE(kt_stat.st_mode) & 0077: res.append((KERBEROS.HUE_KEYTAB, "Keytab should have 0600 permissions (has %o)" % stat.S_IMODE(kt_stat.st_mode))) res.extend(validate_path(KERBEROS.KINIT_PATH, is_dir=False)) res.extend(validate_path(KERBEROS.CCACHE_PATH, is_dir=False)) for broken_app in appmanager.BROKEN_APPS: res.append(('Working Hadoop', 'App %s requires Hadoop but Hadoop is not present.' % (broken_app,))) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs res = [] class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend( validate_path( REMOTE_DEPLOYMENT_DIR, is_dir=True, fs=cluster, message=_( 'The deployment directory of Oozie workflows does not exist. ' 'Run "Setup Examples" on the Oozie workflow page.'))) res.extend( validate_path( ConfigMock('/user/oozie/share/lib'), is_dir=True, fs=cluster, message=_( 'Oozie Share Lib not installed in default location.'))) return res
def config_validator(): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs res = [] class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend(validate_path(REMOTE_DEPLOYMENT_DIR, is_dir=True, fs=cluster, message=_('The deployment directory of Oozie workflows does not exist. ' 'Run "Setup Examples" on the Oozie workflow page.'))) res.extend(validate_path(ConfigMock('/user/oozie/share/lib'), is_dir=True, fs=cluster, message=_('Oozie Share Lib not installed in default location.'))) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs from hadoop.fs.hadoopfs import Hdfs from liboozie.oozie_api import get_oozie res = [] if OOZIE_URL.get(): status = get_oozie_status(user) if 'NORMAL' not in status: res.append((status, _('The Oozie server is not available'))) api = get_oozie(user) intrumentation = api.get_instrumentation() sharelib_url = [param['value'] for group in intrumentation['variables'] for param in group['data'] if param['name'] == 'sharelib.system.libpath'] if sharelib_url: sharelib_url = Hdfs.urlsplit(sharelib_url[0])[2] if not sharelib_url: res.append((status, _('Oozie Share Lib path is not available'))) class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend(validate_path(ConfigMock(sharelib_url), is_dir=True, fs=cluster, message=_('Oozie Share Lib not installed in default location.'))) return res
def config_validator(user): res = [] res.extend(validate_path(SPARK_HOME, is_dir=True)) return res
for param in group['data'] if param['name'] == 'sharelib.system.libpath' ] if sharelib_url: sharelib_url = Hdfs.urlsplit(sharelib_url[0])[2] if not sharelib_url: res.append((status, _('Oozie Share Lib path is not available'))) class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend( validate_path( ConfigMock(sharelib_url), is_dir=True, fs=cluster, message=_( 'Oozie Share Lib not installed in default location.'))) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from hadoop.cluster import get_all_hdfs from hadoop.fs.hadoopfs import Hdfs from liboozie.oozie_api import get_oozie res = [] if OOZIE_URL.get(): status = get_oozie_status(user) if "NORMAL" not in status: res.append((status, _("The Oozie server is not available"))) api = get_oozie(user, api_version="v2") configuration = api.get_configuration() if "org.apache.oozie.service.MetricsInstrumentationService" in [ c.strip() for c in configuration.get("oozie.services.ext", "").split(",") ]: metrics = api.get_metrics() sharelib_url = ( "gauges" in metrics and "libs.sharelib.system.libpath" in metrics["gauges"] and [metrics["gauges"]["libs.sharelib.system.libpath"]["value"]] or [] ) else: intrumentation = api.get_instrumentation() sharelib_url = [ param["value"] for group in intrumentation["variables"] for param in group["data"] if param["name"] == "sharelib.system.libpath" ] if sharelib_url: sharelib_url = Hdfs.urlsplit(sharelib_url[0])[2] if not sharelib_url: res.append((status, _("Oozie Share Lib path is not available"))) class ConfigMock: def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in get_all_hdfs().values(): res.extend( validate_path( ConfigMock(sharelib_url), is_dir=True, fs=cluster, message=_("Oozie Share Lib not installed in default location."), ) ) return res
def config_validator(user): """ config_validator() -> [ (config_variable, error_message) ] Called by core check_config() view. """ from desktop.lib.fsmanager import get_filesystem from hadoop.cluster import get_all_hdfs from hadoop.fs.hadoopfs import Hdfs from liboozie.oozie_api import get_oozie res = [] try: from oozie.conf import REMOTE_SAMPLE_DIR except Exception as e: LOG.warn('Config check failed because Oozie app not installed: %s' % e) return res if OOZIE_URL.get(): status = get_oozie_status(user) if 'NORMAL' not in status: res.append((status, _('The Oozie server is not available'))) fs = get_filesystem() NICE_NAME = 'Oozie' if fs.do_as_superuser(fs.exists, REMOTE_SAMPLE_DIR.get()): stats = fs.do_as_superuser(fs.stats, REMOTE_SAMPLE_DIR.get()) mode = oct(stats.mode) # if neither group nor others have write permission group_has_write = int(mode[-2]) & 2 others_has_write = int(mode[-1]) & 2 if not group_has_write and not others_has_write: res.append( (NICE_NAME, "The permissions of workspace '%s' are too restrictive" % REMOTE_SAMPLE_DIR.get())) api = get_oozie(user, api_version="v2") configuration = api.get_configuration() if 'org.apache.oozie.service.MetricsInstrumentationService' in [ c.strip() for c in configuration.get('oozie.services.ext', '').split(',') ]: metrics = api.get_metrics() sharelib_url = 'gauges' in metrics and 'libs.sharelib.system.libpath' in metrics[ 'gauges'] and [ metrics['gauges']['libs.sharelib.system.libpath']['value'] ] or [] else: intrumentation = api.get_instrumentation() sharelib_url = [ param['value'] for group in intrumentation['variables'] for param in group['data'] if param['name'] == 'sharelib.system.libpath' ] if sharelib_url: sharelib_url = Hdfs.urlsplit(sharelib_url[0])[2] if not sharelib_url: res.append((status, _('Oozie Share Lib path is not available'))) class ConfigMock(object): def __init__(self, value): self.value = value def get(self): return self.value def get_fully_qualifying_key(self): return self.value for cluster in list(get_all_hdfs().values()): res.extend( validate_path( ConfigMock(sharelib_url), is_dir=True, fs=cluster, message=_( 'Oozie Share Lib not installed in default location.'))) return res