Esempio n. 1
0
File: tests.py Progetto: yhanwen/hue
def test_config_validator_more():
    # TODO: Setup DN to not load the plugin, which is a common user error.

    # We don't actually use the mini_cluster. But the cluster sets up the correct
    # configuration that forms the test basis.
    minicluster = pseudo_hdfs4.shared_cluster()
    cli = make_logged_in_client()

    reset = (
        conf.MR_CLUSTERS["default"].HOST.set_for_testing("localhost"),
        conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(23),
    )
    old = cluster.clear_caches()
    try:
        resp = cli.get('/debug/check_config')

        assert_false('Failed to access filesystem root' in resp.content)
        assert_false('Failed to create' in resp.content)
        assert_false('Failed to chown' in resp.content)
        assert_false('Failed to delete' in resp.content)
        assert_true('Failed to contact JobTracker plugin' in resp.content)
    finally:
        for old_conf in reset:
            old_conf()
        cluster.restore_caches(old)
Esempio n. 2
0
def test_config_validator_more():
  # TODO: Setup DN to not load the plugin, which is a common user error.

  # We don't actually use the mini_cluster. But the cluster sets up the correct
  # configuration that forms the test basis.
  minicluster = pseudo_hdfs4.shared_cluster()
  cli = make_logged_in_client()

  reset = (
    conf.MR_CLUSTERS["default"].HOST.set_for_testing("localhost"),
    conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(23),
  )
  old = cluster.clear_caches()
  try:
    resp = cli.get('/debug/check_config')

    assert_false('Failed to access filesystem root' in resp.content)
    assert_false('Failed to create' in resp.content)
    assert_false('Failed to chown' in resp.content)
    assert_false('Failed to delete' in resp.content)
    assert_true('Failed to contact JobTracker plugin' in resp.content)
  finally:
    for old_conf in reset:
      old_conf()
    cluster.restore_caches(old)
Esempio n. 3
0
def test_update_properties():
  finish = []
  finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
  finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
  try:
    properties = {
      'user.name': 'hue',
      'test.1': 'http://localhost/test?test1=test&test2=test'
    }

    final_properties = properties.copy()
    submission = Submission(None, properties=properties, oozie_id='test')
    assert_equal(properties, submission.properties)
    submission._update_properties('jtaddress', 'deployment-directory')
    assert_equal(final_properties, submission.properties)

    cluster.clear_caches()
    fs = cluster.get_hdfs()
    jt = cluster.get_next_ha_mrcluster()[1]
    final_properties = properties.copy()
    final_properties.update({
      'jobTracker': 'jtaddress',
      'nameNode': fs.fs_defaultfs
    })
    submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
    assert_equal(properties, submission.properties)
    submission._update_properties('jtaddress', 'deployment-directory')
    assert_equal(final_properties, submission.properties)

    finish.append(HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
    finish.append(MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
    cluster.clear_caches()
    fs = cluster.get_hdfs()
    jt = cluster.get_next_ha_mrcluster()[1]
    final_properties = properties.copy()
    final_properties.update({
      'jobTracker': 'jobtracker',
      'nameNode': 'namenode'
    })
    submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
    assert_equal(properties, submission.properties)
    submission._update_properties('jtaddress', 'deployment-directory')
    assert_equal(final_properties, submission.properties)
  finally:
    for reset in finish:
      reset()
Esempio n. 4
0
def test_non_default_cluster():
    NON_DEFAULT_NAME = 'non_default'
    cluster.clear_caches()
    reset = (
        conf.HDFS_CLUSTERS.set_for_testing({NON_DEFAULT_NAME: {}}),
        conf.MR_CLUSTERS.set_for_testing({NON_DEFAULT_NAME: {}}),
    )
    try:
        # This is indeed the only hdfs/mr cluster
        assert_equal(1, len(cluster.get_all_hdfs()))
        assert_equal(1, len(cluster.all_mrclusters()))
        assert_true(cluster.get_hdfs(NON_DEFAULT_NAME))
        assert_true(cluster.get_mrcluster(NON_DEFAULT_NAME))

        cli = make_logged_in_client()
        # That we can get to a view without errors means that the middlewares work
        cli.get('/about')
    finally:
        for old_conf in reset:
            old_conf()
Esempio n. 5
0
File: tests.py Progetto: abayer/hue
def test_non_default_cluster():
  NON_DEFAULT_NAME = 'non_default'
  cluster.clear_caches()
  reset = (
    conf.HDFS_CLUSTERS.set_for_testing({ NON_DEFAULT_NAME: { } }),
    conf.MR_CLUSTERS.set_for_testing({ NON_DEFAULT_NAME: { } }),
  )
  try:
    # This is indeed the only hdfs/mr cluster
    assert_equal(1, len(cluster.get_all_hdfs()))
    assert_equal(1, len(cluster.all_mrclusters()))
    assert_true(cluster.get_hdfs(NON_DEFAULT_NAME))
    assert_true(cluster.get_mrcluster(NON_DEFAULT_NAME))

    cli = make_logged_in_client()
    # That we can get to a view without errors means that the middlewares work
    cli.get('/about')
  finally:
    for old_conf in reset:
      old_conf()
Esempio n. 6
0
  def test_massage_uri(self):

    finish = HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode')
    cluster.clear_caches()

    try:
      assert_equal('', _massage_uri(''))

      assert_equal('namenode/data', _massage_uri('hdfs:///data'))

      assert_equal('hdfs://nn:11/data', _massage_uri('hdfs://nn:11/data'))

      assert_equal('hdfs://logical/data', _massage_uri('hdfs://logical/data'))

      assert_equal('namenode/data', _massage_uri('/data'))

      assert_equal('file:///data', _massage_uri('file:///data'))
    finally:
      finish()


    finish = HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing('hdfs://fs_defaultfs:8021')
    cluster.clear_caches()

    try:
      assert_equal('', _massage_uri(''))

      assert_equal('hdfs://fs_defaultfs:8021/data', _massage_uri('hdfs:///data'))

      assert_equal('hdfs://nn:11/data', _massage_uri('hdfs://nn:11/data'))

      assert_equal('hdfs://logical/data', _massage_uri('hdfs://logical/data'))

      assert_equal('hdfs://fs_defaultfs:8021/data', _massage_uri('/data'))

      assert_equal('file:///data', _massage_uri('file:///data'))
    finally:
      finish()
Esempio n. 7
0
def test_config_validator_basic():
  reset = (
    conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing('http://not.the.re:50070/'),
    conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(70000),
  )
  old = cluster.clear_caches()
  try:
    cli = make_logged_in_client()
    resp = cli.get('/debug/check_config')
    assert_true('hadoop.hdfs_clusters.default.webhdfs_url' in resp.content)
  finally:
    for old_conf in reset:
      old_conf()
    cluster.restore_caches(old)
Esempio n. 8
0
def test_update_properties():
    finish = []
    finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
    finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
    try:
        properties = {
            'user.name': 'hue',
            'test.1': 'http://localhost/test?test1=test&test2=test'
        }

        final_properties = properties.copy()
        submission = Submission(None, properties=properties, oozie_id='test')
        assert_equal(properties, submission.properties)
        submission._update_properties('jtaddress', 'deployment-directory')
        assert_equal(final_properties, submission.properties)

        cluster.clear_caches()
        fs = cluster.get_hdfs()
        jt = cluster.get_next_ha_mrcluster()[1]
        final_properties = properties.copy()
        final_properties.update({
            'jobTracker': 'jtaddress',
            'nameNode': fs.fs_defaultfs
        })
        submission = Submission(None,
                                properties=properties,
                                oozie_id='test',
                                fs=fs,
                                jt=jt)
        assert_equal(properties, submission.properties)
        submission._update_properties('jtaddress', 'deployment-directory')
        assert_equal(final_properties, submission.properties)

        finish.append(
            HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
        finish.append(
            MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
        cluster.clear_caches()
        fs = cluster.get_hdfs()
        jt = cluster.get_next_ha_mrcluster()[1]
        final_properties = properties.copy()
        final_properties.update({
            'jobTracker': 'jobtracker',
            'nameNode': 'namenode'
        })
        submission = Submission(None,
                                properties=properties,
                                oozie_id='test',
                                fs=fs,
                                jt=jt)
        assert_equal(properties, submission.properties)
        submission._update_properties('jtaddress', 'deployment-directory')
        assert_equal(final_properties, submission.properties)
    finally:
        cluster.clear_caches()
        for reset in finish:
            reset()
Esempio n. 9
0
def test_config_validator_basic():
    reset = (
        conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(
            'http://not.the.re:50070/'),
        conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(70000),
    )
    old = cluster.clear_caches()
    try:
        cli = make_logged_in_client()
        resp = cli.get('/debug/check_config')
        assert_true('hadoop.hdfs_clusters.default.webhdfs_url' in resp.content)
    finally:
        for old_conf in reset:
            old_conf()
        cluster.restore_caches(old)
Esempio n. 10
0
def test_config_validator_basic():
    reset = (
        conf.HDFS_CLUSTERS["default"].WEBHDFS_URL.set_for_testing("http://not.the.re:50070/"),
        conf.MR_CLUSTERS["default"].JT_THRIFT_PORT.set_for_testing(70000),
    )
    old = cluster.clear_caches()
    try:
        cli = make_logged_in_client()
        resp = cli.get("/debug/check_config")
        assert_true("hadoop.hdfs_clusters.default.webhdfs_url" in resp.content)
        assert_true("hadoop.mapred_clusters.default.thrift_port" in resp.content)
        assert_true("Port should be" in resp.content)
    finally:
        for old_conf in reset:
            old_conf()
        cluster.restore_caches(old)
Esempio n. 11
0
def test_config_validator_more():
  # TODO: Setup DN to not load the plugin, which is a common user error.

  # We don't actually use the mini_cluster. But the cluster sets up the correct
  # configuration that forms the test basis.
  minicluster = pseudo_hdfs4.shared_cluster()
  cli = make_logged_in_client()

  old = cluster.clear_caches()
  try:
    resp = cli.get('/debug/check_config')

    assert_false('Failed to access filesystem root' in resp.content)
    assert_false('Failed to create' in resp.content)
    assert_false('Failed to chown' in resp.content)
    assert_false('Failed to delete' in resp.content)
  finally:
    cluster.restore_caches(old)
Esempio n. 12
0
def test_config_validator_more():
    # TODO: Setup DN to not load the plugin, which is a common user error.

    # We don't actually use the mini_cluster. But the cluster sets up the correct
    # configuration that forms the test basis.
    minicluster = pseudo_hdfs4.shared_cluster()
    cli = make_logged_in_client()

    old = cluster.clear_caches()
    try:
        resp = cli.get('/debug/check_config')

        assert_false('Failed to access filesystem root' in resp.content)
        assert_false('Failed to create' in resp.content)
        assert_false('Failed to chown' in resp.content)
        assert_false('Failed to delete' in resp.content)
    finally:
        cluster.restore_caches(old)
Esempio n. 13
0
def clear_sys_caches():
  return cluster.clear_caches(), fsmanager.clear_cache()
Esempio n. 14
0
def clear_sys_caches():
    return cluster.clear_caches(), fsmanager.clear_cache()
Esempio n. 15
0
def clear_sys_caches():
    return [cluster.clear_caches()]