def test_live_jobtracker(): """ Checks that LiveJobTracker never raises exceptions for most of its calls. """ cluster = mini_cluster.shared_cluster() try: jt = cluster.jt # Make sure that none of the following # raise. assert_true(jt.queues()) assert_true(jt.cluster_status()) assert_true(jt.all_task_trackers()) assert_true(jt.active_trackers()) assert_true(jt.blacklisted_trackers()) # not tested: task_tracker assert_true(jt.running_jobs()) assert_true(jt.completed_jobs()) assert_true(jt.failed_jobs()) assert_true(jt.all_jobs()) # not tested: get_job_counters assert_true(jt.get_current_time()) # not tested: get_job_xml finally: cluster.shutdown()
def test_config_validator_more(): # TODO: Setup DN to not load the plugin, which is a common user error. # We don't actually use the mini_cluster. But the cluster sets up the correct # configuration that forms the test basis. cluster = mini_cluster.shared_cluster() if not cluster.fs.exists('/tmp'): cluster.fs.setuser(cluster.fs.superuser) cluster.fs.mkdir('/tmp', 0777) cli = make_logged_in_client() reset = ( conf.HADOOP_BIN.set_for_testing(cluster.fs.hadoop_bin_path), conf.HDFS_CLUSTERS['default'].NN_HOST.set_for_testing('localhost'), conf.HDFS_CLUSTERS['default'].NN_HDFS_PORT.set_for_testing(22), conf.HDFS_CLUSTERS["default"].NN_THRIFT_PORT.set_for_testing( cluster.fs.thrift_port), conf.MR_CLUSTERS["default"].JT_HOST.set_for_testing("localhost"), conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(23), ) try: resp = cli.get('/debug/check_config') assert_false('Failed to contact Namenode plugin' in resp.content) assert_false('Failed to see HDFS root' in resp.content) assert_true('Failed to upload files' in resp.content) assert_true('Failed to contact JobTracker plugin' in resp.content) finally: for old_conf in reset: old_conf() cluster.shutdown()
def test_config_validator_more(): # TODO: Setup DN to not load the plugin, which is a common user error. # We don't actually use the mini_cluster. But the cluster sets up the correct # configuration that forms the test basis. cluster = mini_cluster.shared_cluster() if not cluster.fs.exists('/tmp'): cluster.fs.setuser(cluster.fs.superuser) cluster.fs.mkdir('/tmp', 0777) cli = make_logged_in_client() reset = ( conf.HADOOP_BIN.set_for_testing(cluster.fs.hadoop_bin_path), conf.HDFS_CLUSTERS['default'].NN_HOST.set_for_testing('localhost'), conf.HDFS_CLUSTERS['default'].NN_HDFS_PORT.set_for_testing(22), conf.HDFS_CLUSTERS["default"].NN_THRIFT_PORT.set_for_testing(cluster.fs.thrift_port), conf.MR_CLUSTERS["default"].JT_HOST.set_for_testing("localhost"), conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(23), ) try: resp = cli.get('/debug/check_config') assert_false('Failed to contact Namenode plugin' in resp.content) assert_false('Failed to see HDFS root' in resp.content) assert_true('Failed to upload files' in resp.content) assert_true('Failed to contact JobTracker plugin' in resp.content) finally: for old_conf in reset: old_conf() cluster.shutdown()