예제 #1
0
def config_validator(user):
  """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
  from hadoop.fs import webhdfs

  res = []
  submit_to = []

  # HDFS_CLUSTERS
  has_default = False
  for name in HDFS_CLUSTERS.keys():
    cluster = HDFS_CLUSTERS[name]
    res.extend(webhdfs.test_fs_configuration(cluster))
    if name == 'default':
      has_default = True
  if not has_default:
    res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.")

  # YARN_CLUSTERS
  for name in YARN_CLUSTERS.keys():
    cluster = YARN_CLUSTERS[name]
    if cluster.SUBMIT_TO.get():
      res.extend(test_yarn_configurations())
      submit_to.append('yarn_clusters.' + name)

  if not submit_to:
    res.append(("hadoop", "Please designate one of the MapReduce or "
                "Yarn clusters with `submit_to=true' in order to run jobs."))

  return res
예제 #2
0
def config_validator(user):
    """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
    from hadoop.fs import webhdfs

    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if not has_default:
        res.append("hadoop.hdfs_clusters",
                   "You should have an HDFS called 'default'.")

    # YARN_CLUSTERS
    for name in YARN_CLUSTERS.keys():
        cluster = YARN_CLUSTERS[name]
        if cluster.SUBMIT_TO.get():
            res.extend(test_yarn_configurations())
            submit_to.append('yarn_clusters.' + name)

    if not submit_to:
        res.append(
            ("hadoop", "Please designate one of the MapReduce or "
             "Yarn clusters with `submit_to=true' in order to run jobs."))

    return res
예제 #3
0
def config_validator():
    """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
    from hadoop.fs import webhdfs
    from hadoop import job_tracker
    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if not has_default:
        res.append("hadoop.hdfs_clusters",
                   "You should have an HDFS called 'default'.")

    # MR_CLUSTERS
    for name in MR_CLUSTERS.keys():
        cluster = MR_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(job_tracker.test_jt_configuration(cluster))
        if cluster.SUBMIT_TO.get():
            submit_to.append('mapred_clusters.' + name)

    # YARN_CLUSTERS
    for name in YARN_CLUSTERS.keys():
        cluster = YARN_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        if cluster.SUBMIT_TO.get():
            submit_to.append('yarn_clusters.' + name)

    # Only one cluster should have submit_to
    if len(submit_to) > 1:
        res.append(("hadoop", "Only one cluster may enable 'submit_to'. "
                    "But it is enabled in the following clusters: " +
                    ', '.join(submit_to)))
    elif len(submit_to) == 0:
        res.append(
            ("hadoop", "Please designate one of the MapReduce or "
             "Yarn clusters with `submit_to=true' in order to run jobs."))

    return res
예제 #4
0
def config_validator(user):
    """
    config_validator() -> [ (config_variable, error_message) ]

    Called by core check_config() view.
    """
    from hadoop.fs import webhdfs
    from hadoop import job_tracker

    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if not has_default:
        res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.")

    # MR_CLUSTERS
    for name in MR_CLUSTERS.keys():
        cluster = MR_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(job_tracker.test_jt_configuration(cluster))
        if cluster.SUBMIT_TO.get():
            submit_to.append('mapred_clusters.' + name)

    # YARN_CLUSTERS
    for name in YARN_CLUSTERS.keys():
        cluster = YARN_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        if cluster.SUBMIT_TO.get():
            submit_to.append('yarn_clusters.' + name)

    # Only one cluster should have submit_to
    if len(submit_to) > 1:
        res.append(("hadoop", "Only one cluster may enable 'submit_to'. "
                              "But it is enabled in the following clusters: " +
                              ', '.join(submit_to)))
    elif len(submit_to) == 0:
        res.append(("hadoop", "Please designate one of the MapReduce or "
                              "Yarn clusters with `submit_to=true' in order to run jobs."))

    return res
예제 #5
0
def config_validator(user):
    """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
    from hadoop.fs import webhdfs
    from hadoop import job_tracker
    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if not has_default:
        res.append("hadoop.hdfs_clusters",
                   "You should have an HDFS called 'default'.")

    # MR_CLUSTERS
    mr_down = []
    for name in MR_CLUSTERS.keys():
        cluster = MR_CLUSTERS[name]
        if cluster.SUBMIT_TO.get():
            res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
            mr_down.extend(job_tracker.test_jt_configuration(cluster))
            submit_to.append('mapred_clusters.' + name)
    # If HA still failing
    if mr_down and len(mr_down) == len(MR_CLUSTERS.keys()):
        res.extend(mr_down)

    # YARN_CLUSTERS
    for name in YARN_CLUSTERS.keys():
        cluster = YARN_CLUSTERS[name]
        if cluster.SUBMIT_TO.get():
            res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
            submit_to.append('yarn_clusters.' + name)

    if not submit_to:
        res.append(
            ("hadoop", "Please designate one of the MapReduce or "
             "Yarn clusters with `submit_to=true' in order to run jobs."))

    return res
예제 #6
0
파일: conf.py 프로젝트: chiehwen/hue
def config_validator(user):
  """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
  from hadoop.fs import webhdfs
  from hadoop import job_tracker
  res = []
  submit_to = []

  # HDFS_CLUSTERS
  has_default = False
  for name in HDFS_CLUSTERS.keys():
    cluster = HDFS_CLUSTERS[name]
    res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
    res.extend(webhdfs.test_fs_configuration(cluster))
    if name == 'default':
      has_default = True
  if not has_default:
    res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.")

  # MR_CLUSTERS
  mr_down = []
  for name in MR_CLUSTERS.keys():
    cluster = MR_CLUSTERS[name]
    res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
    mr_down.extend(job_tracker.test_jt_configuration(cluster))
    if cluster.SUBMIT_TO.get():
      submit_to.append('mapred_clusters.' + name)
  # If HA still failing
  if mr_down and len(mr_down) == len(MR_CLUSTERS.keys()):
    res.extend(mr_down)

  # YARN_CLUSTERS
  for name in YARN_CLUSTERS.keys():
    cluster = YARN_CLUSTERS[name]
    res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
    if cluster.SUBMIT_TO.get():
      submit_to.append('yarn_clusters.' + name)

  if not submit_to:
    res.append(("hadoop", "Please designate one of the MapReduce or "
                "Yarn clusters with `submit_to=true' in order to run jobs."))

  return res
예제 #7
0
def config_validator(user):
    """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
    from hadoop.fs import webhdfs

    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in list(HDFS_CLUSTERS.keys()):
        cluster = HDFS_CLUSTERS[name]
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if HDFS_CLUSTERS.keys() and not has_default:
        res.append(("hadoop.hdfs_clusters",
                    "You should have an HDFS called 'default'."))

    # YARN_CLUSTERS
    for name in list(YARN_CLUSTERS.keys()):
        cluster = YARN_CLUSTERS[name]
        if cluster.SUBMIT_TO.get():
            submit_to.append('yarn_clusters.' + name)

    if YARN_CLUSTERS.keys() and not submit_to:
        res.append((
            "hadoop",
            "Please designate one of the MapReduce or Yarn clusters with `submit_to=true' in order to run jobs."
        ))
    else:
        res.extend(test_yarn_configurations(user))

    if get_spark_history_server_from_cm():
        status = test_spark_configuration(user)
        if status != 'OK':
            res.append((
                "Spark_history_server",
                "Spark job can't retrieve logs of driver and executors without a running Spark history server"
            ))

    return res