def test_merge_configs(self): provider = ru.HadoopAutoConfigsProvider({}, None, None, False) initial_configs = { 'cat': { 'talk': 'meow', }, 'bond': { 'name': 'james' } } extra_configs = { 'dog': { 'talk': 'woof' }, 'bond': { 'extra_name': 'james bond' } } expected = { 'cat': { 'talk': 'meow', }, 'dog': { 'talk': 'woof' }, 'bond': { 'name': 'james', 'extra_name': 'james bond' } } self.assertEqual( expected, provider._merge_configs(initial_configs, extra_configs))
def test_get_recommended_node_configs_small_flavor(self, fake_flavor): ng = FakeObject(flavor_id="fake_flavor", node_configs=Configs({})) cl = FakeObject(cluster_configs=Configs({})) fake_flavor.return_value = FakeObject(ram=2048, vcpus=1) observed = ru.HadoopAutoConfigsProvider( { 'node_configs': {}, 'cluster_configs': {} }, [], cl, False, )._get_recommended_node_configs(ng) self.assertEqual( { 'mapreduce.reduce.java.opts': '-Xmx409m', 'yarn.app.mapreduce.am.resource.mb': 256, 'mapreduce.reduce.memory.mb': 512, 'mapreduce.map.java.opts': '-Xmx204m', 'yarn.app.mapreduce.am.command-opts': '-Xmx204m', 'mapreduce.task.io.sort.mb': 102, 'mapreduce.map.memory.mb': 256, 'yarn.nodemanager.resource.memory-mb': 2048, 'yarn.scheduler.minimum-allocation-mb': 256, 'yarn.nodemanager.vmem-check-enabled': 'false', 'yarn.scheduler.maximum-allocation-mb': 2048, }, observed)
def recommend_configs(cluster, plugin_configs, scaling): yarn_configs = [ 'yarn.nodemanager.resource.memory-mb', 'yarn.scheduler.minimum-allocation-mb', 'yarn.scheduler.maximum-allocation-mb', 'yarn.nodemanager.vmem-check-enabled', ] mapred_configs = [ 'yarn.app.mapreduce.am.resource.mb', 'yarn.app.mapreduce.am.command-opts', 'mapreduce.map.memory.mb', 'mapreduce.reduce.memory.mb', 'mapreduce.map.java.opts', 'mapreduce.reduce.java.opts', 'mapreduce.task.io.sort.mb', ] configs_to_configure = { 'cluster_configs': { 'dfs.replication': ('HDFS', 'dfs.replication') }, 'node_configs': { } } for mapr in mapred_configs: configs_to_configure['node_configs'][mapr] = ("MapReduce", mapr) for yarn in yarn_configs: configs_to_configure['node_configs'][yarn] = ('YARN', yarn) provider = ru.HadoopAutoConfigsProvider( configs_to_configure, plugin_configs, cluster, scaling) provider.apply_recommended_configs()
def recommend_configs(self, cluster, scaling=False): want_to_configure = { 'cluster_configs': { 'dfs.replication': ('HDFS', 'dfs.replication') } } provider = ru.HadoopAutoConfigsProvider( want_to_configure, self.get_configs(cluster.hadoop_version), cluster, scaling) provider.apply_recommended_configs()
def test_apply_recommended_configs_no_updates(self, cond_cluster, cond_node_group, fake_flavor): fake_flavor.return_value = FakeObject(ram=2048, vcpus=1) to_tune = { 'cluster_configs': { 'dfs.replication': ('dfs', 'replica') }, 'node_configs': { 'mapreduce.task.io.sort.mb': ('bond', 'extra_name') } } fake_plugin_configs = [ FakeObject(applicable_target='dfs', name='replica', default_value=3) ] fake_ng = FakeObject(use_autoconfig=True, count=2, node_processes=['dog_datanode'], flavor_id='fake_id', node_configs=Configs( {'bond': { 'extra_name': 'james bond' }})) fake_cluster = FakeObject(cluster_configs=Configs( {'dfs': { 'replica': 1 }}), node_groups=[fake_ng], use_autoconfig=True, extra=Configs({})) v = ru.HadoopAutoConfigsProvider(to_tune, fake_plugin_configs, fake_cluster, False) v.apply_recommended_configs() self.assertEqual(0, cond_node_group.call_count) self.assertEqual([ mock.call(context.ctx(), fake_cluster, {'extra': { 'auto-configured': True }}) ], cond_cluster.call_args_list)
def test_get_recommended_node_configs_medium_flavor(self, fake_flavor): ng = FakeObject(flavor_id="fake_flavor", node_configs=Configs({})) cl = FakeObject(cluster_configs=Configs({})) fake_flavor.return_value = FakeObject(ram=4096, vcpus=2) observed = ru.HadoopAutoConfigsProvider( {}, [], cl, False)._get_recommended_node_configs(ng) self.assertEqual( { 'mapreduce.reduce.memory.mb': 768, 'mapreduce.map.java.opts': '-Xmx307m', 'mapreduce.map.memory.mb': 384, 'mapreduce.reduce.java.opts': '-Xmx614m', 'yarn.app.mapreduce.am.resource.mb': 384, 'yarn.app.mapreduce.am.command-opts': '-Xmx307m', 'mapreduce.task.io.sort.mb': 153, 'yarn.nodemanager.resource.memory-mb': 3072, 'yarn.scheduler.minimum-allocation-mb': 384, 'yarn.scheduler.maximum-allocation-mb': 3072, 'yarn.nodemanager.vmem-check-enabled': 'false' }, observed)
def test_not_autonconfigured(self, cluster_update): fake_cluster = FakeObject(extra=Configs({})) v = ru.HadoopAutoConfigsProvider({}, [], fake_cluster, True) v.apply_recommended_configs() self.assertEqual(0, cluster_update.call_count)