def test_local_jar(self): fake_jar = os.path.join(self.tmp_dir, 'fake.jar') open(fake_jar, 'w').close() job = MRJustAJar(['-r', 'hadoop', '--jar', fake_jar]) job.sandbox() with job.make_runner() as runner: runner.run() hadoop_cmd_args = get_mock_hadoop_cmd_args() hadoop_jar_cmd_args = [args for args in hadoop_cmd_args if args and args[0] == 'jar'] self.assertEqual(len(hadoop_jar_cmd_args), 1) self.assertEqual(hadoop_jar_cmd_args[0], ['jar', fake_jar])
def test_input_output_interpolation(self): fake_jar = os.path.join(self.tmp_dir, 'fake.jar') open(fake_jar, 'w').close() input1 = os.path.join(self.tmp_dir, 'input1') open(input1, 'w').close() input2 = os.path.join(self.tmp_dir, 'input2') open(input2, 'w').close() job = MRJarAndStreaming( ['-r', 'hadoop', '--jar', fake_jar, input1, input2]) job.sandbox() add_mock_hadoop_output([b'']) # need this for streaming step with job.make_runner() as runner: runner.run() hadoop_cmd_args = get_mock_hadoop_cmd_args() hadoop_jar_cmd_args = [ args for args in hadoop_cmd_args if args and args[0] == 'jar' ] self.assertEqual(len(hadoop_jar_cmd_args), 2) jar_args, streaming_args = hadoop_jar_cmd_args self.assertEqual(len(jar_args), 5) self.assertEqual(jar_args[0], 'jar') self.assertEqual(jar_args[1], fake_jar) self.assertEqual(jar_args[2], 'stuff') # check input is interpolated input_arg = ','.join( runner._upload_mgr.uri(path) for path in (input1, input2)) self.assertEqual(jar_args[3], input_arg) # check output of jar is input of next step jar_output_arg = jar_args[4] streaming_input_arg = streaming_args[streaming_args.index('-input') + 1] self.assertEqual(jar_output_arg, streaming_input_arg)
def test_hdfs_jar_uri(self): # this could change, but for now, we pass URIs straight through mock_hdfs_jar = os.path.join(get_mock_hdfs_root(), 'fake.jar') open(mock_hdfs_jar, 'w').close() jar_uri = 'hdfs:///fake.jar' job = MRJustAJar(['-r', 'hadoop', '--jar', jar_uri]) job.sandbox() with job.make_runner() as runner: with logger_disabled('mrjob.hadoop'): # `hadoop jar` doesn't actually accept URIs self.assertRaises(StepFailedException, runner.run) hadoop_cmd_args = get_mock_hadoop_cmd_args() hadoop_jar_cmd_args = [args for args in hadoop_cmd_args if args and args[0] == 'jar'] self.assertEqual(len(hadoop_jar_cmd_args), 1) self.assertEqual(hadoop_jar_cmd_args[0], ['jar', jar_uri])
def test_input_output_interpolation(self): fake_jar = os.path.join(self.tmp_dir, 'fake.jar') open(fake_jar, 'w').close() input1 = os.path.join(self.tmp_dir, 'input1') open(input1, 'w').close() input2 = os.path.join(self.tmp_dir, 'input2') open(input2, 'w').close() job = MRJarAndStreaming( ['-r', 'hadoop', '--jar', fake_jar, input1, input2]) job.sandbox() add_mock_hadoop_output([b'']) # need this for streaming step with job.make_runner() as runner: runner.run() hadoop_cmd_args = get_mock_hadoop_cmd_args() hadoop_jar_cmd_args = [args for args in hadoop_cmd_args if args and args[0] == 'jar'] self.assertEqual(len(hadoop_jar_cmd_args), 2) jar_args, streaming_args = hadoop_jar_cmd_args self.assertEqual(len(jar_args), 5) self.assertEqual(jar_args[0], 'jar') self.assertEqual(jar_args[1], fake_jar) self.assertEqual(jar_args[2], 'stuff') # check input is interpolated input_arg = ','.join( runner._upload_mgr.uri(path) for path in (input1, input2)) self.assertEqual(jar_args[3], input_arg) # check output of jar is input of next step jar_output_arg = jar_args[4] streaming_input_arg = streaming_args[ streaming_args.index('-input') + 1] self.assertEqual(jar_output_arg, streaming_input_arg)
def _test_end_to_end(self, args=()): # read from STDIN, a local file, and a remote file stdin = BytesIO(b'foo\nbar\n') local_input_path = os.path.join(self.tmp_dir, 'input') with open(local_input_path, 'w') as local_input_file: local_input_file.write('bar\nqux\n') input_to_upload = os.path.join(self.tmp_dir, 'remote_input') with open(input_to_upload, 'w') as input_to_upload_file: input_to_upload_file.write('foo\n') remote_input_path = 'hdfs:///data/foo' check_call([self.hadoop_bin, 'fs', '-put', input_to_upload, remote_input_path]) # add counters add_mock_hadoop_counters({'foo': {'bar': 23}}) add_mock_hadoop_counters({'baz': {'qux': 42}}) # doesn't matter what the intermediate output is; just has to exist. add_mock_hadoop_output([b'']) add_mock_hadoop_output([b'1\t"qux"\n2\t"bar"\n', b'2\t"foo"\n5\tnull\n']) mr_job = MRTwoStepJob(['-r', 'hadoop', '-v', '--no-conf', '--hadoop-arg', '-libjar', '--hadoop-arg', 'containsJars.jar'] + list(args) + ['-', local_input_path, remote_input_path] + ['--jobconf', 'x=y']) mr_job.sandbox(stdin=stdin) local_tmp_dir = None results = [] with mr_job.make_runner() as runner: assert isinstance(runner, HadoopJobRunner) runner.run() for line in runner.stream_output(): key, value = mr_job.parse_output_line(line) results.append((key, value)) local_tmp_dir = runner._get_local_tmp_dir() # make sure cleanup hasn't happened yet assert os.path.exists(local_tmp_dir) assert any(runner.fs.ls(runner.get_output_dir())) # make sure we're writing to the correct path in HDFS hdfs_root = get_mock_hdfs_root() self.assertEqual(sorted(os.listdir(hdfs_root)), ['data', 'user']) home_dir = os.path.join(hdfs_root, 'user', getpass.getuser()) self.assertEqual(os.listdir(home_dir), ['tmp']) self.assertEqual(os.listdir(os.path.join(home_dir, 'tmp')), ['mrjob']) self.assertEqual(runner._opts['hadoop_extra_args'], ['-libjar', 'containsJars.jar']) # make sure mrjob.tar.gz is was uploaded self.assertTrue(os.path.exists(runner._mrjob_tar_gz_path)) self.assertIn(runner._mrjob_tar_gz_path, runner._upload_mgr.path_to_uri()) # make sure setup script exists, and mrjob.tar.gz is added # to PYTHONPATH in it self.assertTrue(os.path.exists(runner._setup_wrapper_script_path)) self.assertIn(runner._setup_wrapper_script_path, runner._upload_mgr.path_to_uri()) mrjob_tar_gz_name = runner._working_dir_mgr.name( 'archive', runner._mrjob_tar_gz_path) with open(runner._setup_wrapper_script_path) as wrapper: self.assertTrue(any( ('export PYTHONPATH' in line and mrjob_tar_gz_name in line) for line in wrapper)) self.assertEqual(runner.counters(), [{'foo': {'bar': 23}}, {'baz': {'qux': 42}}]) self.assertEqual(sorted(results), [(1, 'qux'), (2, 'bar'), (2, 'foo'), (5, None)]) # make sure we called hadoop the way we expected hadoop_cmd_args = get_mock_hadoop_cmd_args() jar_cmd_args = [cmd_args for cmd_args in hadoop_cmd_args if cmd_args[:1] == ['jar']] self.assertEqual(len(jar_cmd_args), 2) step_0_args, step_1_args = jar_cmd_args # check input/output format self.assertIn('-inputformat', step_0_args) self.assertNotIn('-outputformat', step_0_args) self.assertNotIn('-inputformat', step_1_args) self.assertIn('-outputformat', step_1_args) # make sure -libjar extra arg comes before -mapper for args in (step_0_args, step_1_args): self.assertIn('-libjar', args) self.assertIn('-mapper', args) self.assertLess(args.index('-libjar'), args.index('-mapper')) # make sure -jobconf made it through self.assertIn('-D', step_0_args) self.assertIn('x=y', step_0_args) self.assertIn('-D', step_1_args) # job overrides jobconf in step 1 self.assertIn('x=z', step_1_args) # make sure cleanup happens assert not os.path.exists(local_tmp_dir) assert not any(runner.fs.ls(runner.get_output_dir()))
def _test_end_to_end(self, args=()): # read from STDIN, a local file, and a remote file stdin = BytesIO(b'foo\nbar\n') local_input_path = os.path.join(self.tmp_dir, 'input') with open(local_input_path, 'w') as local_input_file: local_input_file.write('bar\nqux\n') input_to_upload = os.path.join(self.tmp_dir, 'remote_input') with open(input_to_upload, 'w') as input_to_upload_file: input_to_upload_file.write('foo\n') remote_input_path = 'hdfs:///data/foo' check_call([self.hadoop_bin, 'fs', '-put', input_to_upload, remote_input_path]) # add counters add_mock_hadoop_counters({'foo': {'bar': 23}}) add_mock_hadoop_counters({'baz': {'qux': 42}}) # doesn't matter what the intermediate output is; just has to exist. add_mock_hadoop_output([b'']) add_mock_hadoop_output([b'1\t"qux"\n2\t"bar"\n', b'2\t"foo"\n5\tnull\n']) mr_job = MRTwoStepJob(['-r', 'hadoop', '-v', '--no-conf', '--hadoop-arg', '-libjar', '--hadoop-arg', 'containsJars.jar'] + list(args) + ['-', local_input_path, remote_input_path] + ['--jobconf', 'x=y']) mr_job.sandbox(stdin=stdin) local_tmp_dir = None results = [] with mr_job.make_runner() as runner: assert isinstance(runner, HadoopJobRunner) runner.run() for line in runner.stream_output(): key, value = mr_job.parse_output_line(line) results.append((key, value)) local_tmp_dir = runner._get_local_tmp_dir() # make sure cleanup hasn't happened yet assert os.path.exists(local_tmp_dir) assert any(runner.fs.ls(runner.get_output_dir())) # make sure we're writing to the correct path in HDFS hdfs_root = get_mock_hdfs_root() self.assertEqual(sorted(os.listdir(hdfs_root)), ['data', 'user']) home_dir = os.path.join(hdfs_root, 'user', getpass.getuser()) self.assertEqual(os.listdir(home_dir), ['tmp']) self.assertEqual(os.listdir(os.path.join(home_dir, 'tmp')), ['mrjob']) self.assertEqual(runner._opts['hadoop_extra_args'], ['-libjar', 'containsJars.jar']) # make sure mrjob.tar.gz is was uploaded self.assertTrue(os.path.exists(runner._mrjob_tar_gz_path)) self.assertIn(runner._mrjob_tar_gz_path, runner._upload_mgr.path_to_uri()) # make sure setup script exists, and mrjob.tar.gz is added # to PYTHONPATH in it self.assertTrue(os.path.exists(runner._setup_wrapper_script_path)) self.assertIn(runner._setup_wrapper_script_path, runner._upload_mgr.path_to_uri()) mrjob_tar_gz_name = runner._working_dir_mgr.name( 'archive', runner._mrjob_tar_gz_path) with open(runner._setup_wrapper_script_path) as wrapper: self.assertTrue(any( ('export PYTHONPATH' in line and mrjob_tar_gz_name in line) for line in wrapper)) self.assertEqual(runner.counters(), [{'foo': {'bar': 23}}, {'baz': {'qux': 42}}]) self.assertEqual(sorted(results), [(1, 'qux'), (2, 'bar'), (2, 'foo'), (5, None)]) # make sure we called hadoop the way we expected hadoop_cmd_args = get_mock_hadoop_cmd_args() jar_cmd_args = [cmd_args for cmd_args in hadoop_cmd_args if cmd_args[:1] == ['jar']] self.assertEqual(len(jar_cmd_args), 2) step_0_args, step_1_args = jar_cmd_args # check input/output format self.assertIn('-inputformat', step_0_args) self.assertNotIn('-outputformat', step_0_args) self.assertNotIn('-inputformat', step_1_args) self.assertIn('-outputformat', step_1_args) # make sure -libjar extra arg comes before -mapper for args in (step_0_args, step_1_args): self.assertIn('-libjar', args) self.assertIn('-mapper', args) self.assertLess(args.index('-libjar'), args.index('-mapper')) # make sure -D (jobconf) made it through self.assertIn('-D', step_0_args) self.assertIn('x=y', step_0_args) self.assertIn('-D', step_1_args) # job overrides jobconf in step 1 self.assertIn('x=z', step_1_args) # make sure cleanup happens assert not os.path.exists(local_tmp_dir) assert not any(runner.fs.ls(runner.get_output_dir()))