コード例 #1
0
def config_validator():
  """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
  from hadoop.fs import hadoopfs
  from hadoop import job_tracker
  res = [ ]

  # HADOOP_HOME
  res.extend(validate_path(HADOOP_HOME, is_dir=True))
  # HADOOP_BIN
  res.extend(validate_path(HADOOP_BIN, is_dir=False))

  # JARs: even though these are private, we need them to run jobsub
  res.extend(validate_path(HADOOP_EXAMPLES_JAR, is_dir=False))
  res.extend(validate_path(HADOOP_STREAMING_JAR, is_dir=False))

  # HDFS_CLUSTERS
  for name in HDFS_CLUSTERS.keys():
    cluster = HDFS_CLUSTERS[name]
    res.extend(hadoopfs.test_fs_configuration(cluster, HADOOP_BIN))

  # MR_CLUSTERS
  for name in MR_CLUSTERS.keys():
    cluster = MR_CLUSTERS[name]
    res.extend(job_tracker.test_jt_configuration(cluster))

  return res
コード例 #2
0
def config_validator():
    """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
    from hadoop.fs import hadoopfs
    from hadoop import job_tracker
    res = []

    # HADOOP_HOME
    res.extend(validate_path(HADOOP_HOME, is_dir=True))
    # HADOOP_BIN
    res.extend(validate_path(HADOOP_BIN, is_dir=False))

    # JARs: even though these are private, we need them to run jobsub
    res.extend(validate_path(HADOOP_EXAMPLES_JAR, is_dir=False))
    res.extend(validate_path(HADOOP_STREAMING_JAR, is_dir=False))

    # HDFS_CLUSTERS
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(hadoopfs.test_fs_configuration(cluster, HADOOP_BIN))

    # MR_CLUSTERS
    for name in MR_CLUSTERS.keys():
        cluster = MR_CLUSTERS[name]
        res.extend(job_tracker.test_jt_configuration(cluster))

    return res
コード例 #3
0
def config_validator():
    """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
    from hadoop.fs import webhdfs
    from hadoop import job_tracker
    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if not has_default:
        res.append("hadoop.hdfs_clusters",
                   "You should have an HDFS called 'default'.")

    # MR_CLUSTERS
    for name in MR_CLUSTERS.keys():
        cluster = MR_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(job_tracker.test_jt_configuration(cluster))
        if cluster.SUBMIT_TO.get():
            submit_to.append('mapred_clusters.' + name)

    # YARN_CLUSTERS
    for name in YARN_CLUSTERS.keys():
        cluster = YARN_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        if cluster.SUBMIT_TO.get():
            submit_to.append('yarn_clusters.' + name)

    # Only one cluster should have submit_to
    if len(submit_to) > 1:
        res.append(("hadoop", "Only one cluster may enable 'submit_to'. "
                    "But it is enabled in the following clusters: " +
                    ', '.join(submit_to)))
    elif len(submit_to) == 0:
        res.append(
            ("hadoop", "Please designate one of the MapReduce or "
             "Yarn clusters with `submit_to=true' in order to run jobs."))

    return res
コード例 #4
0
def config_validator(user):
    """
    config_validator() -> [ (config_variable, error_message) ]

    Called by core check_config() view.
    """
    from hadoop.fs import webhdfs
    from hadoop import job_tracker

    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if not has_default:
        res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.")

    # MR_CLUSTERS
    for name in MR_CLUSTERS.keys():
        cluster = MR_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(job_tracker.test_jt_configuration(cluster))
        if cluster.SUBMIT_TO.get():
            submit_to.append('mapred_clusters.' + name)

    # YARN_CLUSTERS
    for name in YARN_CLUSTERS.keys():
        cluster = YARN_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        if cluster.SUBMIT_TO.get():
            submit_to.append('yarn_clusters.' + name)

    # Only one cluster should have submit_to
    if len(submit_to) > 1:
        res.append(("hadoop", "Only one cluster may enable 'submit_to'. "
                              "But it is enabled in the following clusters: " +
                              ', '.join(submit_to)))
    elif len(submit_to) == 0:
        res.append(("hadoop", "Please designate one of the MapReduce or "
                              "Yarn clusters with `submit_to=true' in order to run jobs."))

    return res
コード例 #5
0
def config_validator(user):
    """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
    from hadoop.fs import webhdfs
    from hadoop import job_tracker
    res = []
    submit_to = []

    # HDFS_CLUSTERS
    has_default = False
    for name in HDFS_CLUSTERS.keys():
        cluster = HDFS_CLUSTERS[name]
        res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
        res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
        res.extend(webhdfs.test_fs_configuration(cluster))
        if name == 'default':
            has_default = True
    if not has_default:
        res.append("hadoop.hdfs_clusters",
                   "You should have an HDFS called 'default'.")

    # MR_CLUSTERS
    mr_down = []
    for name in MR_CLUSTERS.keys():
        cluster = MR_CLUSTERS[name]
        if cluster.SUBMIT_TO.get():
            res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
            mr_down.extend(job_tracker.test_jt_configuration(cluster))
            submit_to.append('mapred_clusters.' + name)
    # If HA still failing
    if mr_down and len(mr_down) == len(MR_CLUSTERS.keys()):
        res.extend(mr_down)

    # YARN_CLUSTERS
    for name in YARN_CLUSTERS.keys():
        cluster = YARN_CLUSTERS[name]
        if cluster.SUBMIT_TO.get():
            res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
            res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
            submit_to.append('yarn_clusters.' + name)

    if not submit_to:
        res.append(
            ("hadoop", "Please designate one of the MapReduce or "
             "Yarn clusters with `submit_to=true' in order to run jobs."))

    return res
コード例 #6
0
ファイル: conf.py プロジェクト: chiehwen/hue
def config_validator(user):
  """
  config_validator() -> [ (config_variable, error_message) ]

  Called by core check_config() view.
  """
  from hadoop.fs import webhdfs
  from hadoop import job_tracker
  res = []
  submit_to = []

  # HDFS_CLUSTERS
  has_default = False
  for name in HDFS_CLUSTERS.keys():
    cluster = HDFS_CLUSTERS[name]
    res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
    res.extend(webhdfs.test_fs_configuration(cluster))
    if name == 'default':
      has_default = True
  if not has_default:
    res.append("hadoop.hdfs_clusters", "You should have an HDFS called 'default'.")

  # MR_CLUSTERS
  mr_down = []
  for name in MR_CLUSTERS.keys():
    cluster = MR_CLUSTERS[name]
    res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
    mr_down.extend(job_tracker.test_jt_configuration(cluster))
    if cluster.SUBMIT_TO.get():
      submit_to.append('mapred_clusters.' + name)
  # If HA still failing
  if mr_down and len(mr_down) == len(MR_CLUSTERS.keys()):
    res.extend(mr_down)

  # YARN_CLUSTERS
  for name in YARN_CLUSTERS.keys():
    cluster = YARN_CLUSTERS[name]
    res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
    res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
    if cluster.SUBMIT_TO.get():
      submit_to.append('yarn_clusters.' + name)

  if not submit_to:
    res.append(("hadoop", "Please designate one of the MapReduce or "
                "Yarn clusters with `submit_to=true' in order to run jobs."))

  return res