def test_cleared_value(self): self.assertEqual( combine_jobconfs( dict(foo='bar', bar='baz'), dict(bar=ClearedValue('qux')), ), dict(foo='bar', bar='qux'), )
def test_deleted_value(self): self.assertEqual( combine_jobconfs( dict(foo='bar', bar='baz'), dict(bar=ClearedValue(None)), ), dict(foo='bar'), )
def test_blank_out_None_values(self): self.assertEqual( combine_jobconfs( dict(foo='bar', bar='baz'), dict(bar=None), ), dict(foo='bar'), )
def test_later_values_take_precedence(self): self.assertEqual( combine_jobconfs( dict(foo='bar', bar='baz'), dict(foo='baz'), ), dict(foo='baz', bar='baz'), )
def test_convert_non_string_values(self): self.assertEqual( combine_jobconfs( dict(foo=True, bar=False, baz=None, qux=1), dict(qux=2), ), # None is blanked out dict(foo='true', bar='false', qux='2'), )
def test_skip_None(self): self.assertEqual( combine_jobconfs( None, dict(foo='bar', bar='baz'), None, dict(foo='baz'), None, ), dict(foo='baz', bar='baz'), )
def _jobconf_for_step(self, step_num): """Get the jobconf dictionary, optionally including step-specific jobconf info. Also translate jobconfs to the current Hadoop version, if necessary. """ step = self._get_step(step_num) # _sort_values_jobconf() isn't relevant to Spark, # but it doesn't do any harm either jobconf = combine_jobconfs(self._sort_values_jobconf(), self._opts['jobconf'], step.get('jobconf')) # if user is using the wrong jobconfs, add in the correct ones # and log a warning hadoop_version = self.get_hadoop_version() if hadoop_version: jobconf = translate_jobconf_dict(jobconf, hadoop_version) return jobconf
def test_empty(self): self.assertEqual(combine_jobconfs(), {})