def test_defaults(): c = DeployConfig("muchos", '../../conf/muchos.props.example', '../../conf/hosts/example/example_cluster', 'mycluster') assert c.get('ec2', 'default_instance_type') == 'm3.large' assert c.get('ec2', 'worker_instance_type') == 'm3.large' assert c.num_ephemeral('worker1') == 1 assert c.num_ephemeral('worker1') == c.worker_num_ephemeral() assert c.max_ephemeral() == 1 assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1'] assert c.devices(2) == ['/dev/xvdb', '/dev/xvdc'] assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0',], 'devices': ['/dev/xvdb',]}, 'worker': {'mounts': ['/media/ephemeral0',], 'devices': ['/dev/xvdb',]}} assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert c.get('ec2', 'region') == 'us-east-1' assert c.has_option('ec2', 'vpc_id') == False assert c.has_option('ec2', 'subnet_id') == False assert c.get('ec2', 'key_name') == 'my_aws_key' assert c.instance_tags() == {} assert len(c.nodes()) == 7 assert c.get_node('leader1') == ['namenode', 'zookeeper', 'fluo'] assert c.get_node('worker1') == ['worker'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('fluo') assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3'] assert c.get_service_hostnames('zookeeper') == ['leader1', 'leader2', 'leader3'] assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader3': ('10.0.0.2', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.3', None), 'worker3': ('10.0.0.5', None), 'worker2': ('10.0.0.4', None)} assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.version("accumulo").startswith('1.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('2.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == ['10.0.0.3', '10.0.0.4', '10.0.0.5'] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_basedir') == "/home/centos" assert c.get('general', 'cluster_user') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'leader3'), ('10.0.0.3', 'worker1'), ('10.0.0.4', 'worker2'), ('10.0.0.5', 'worker3')] assert c.get_host_services() == [('leader1', 'namenode zookeeper fluo'), ('leader2', 'resourcemanager zookeeper'), ('leader3', 'accumulomaster zookeeper'), ('metrics', 'metrics'), ('worker1', 'worker'), ('worker2', 'worker'), ('worker3', 'worker')] assert c.get_image_id('m3.large') == 'ami-6d1c2007' assert c.get('ec2', 'aws_access_key') == 'access_key' assert c.get('ec2', 'aws_secret_key') == 'secret_key'
def test_azure_cluster(): c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster', '../conf/checksums', '../conf/templates', 'mycluster') # since we are sharing a single muchos.props.example file, we need # to stub the cluster type to be azure (as the file itself has a default of ec2) c.cluster_type = 'azure' assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe' assert c.checksum('accumulo') == 'df172111698c7a73aa031de09bd5589263a6b824482fbb9b4f0440a16602ed47' assert c.get('azure', 'vm_sku') == 'Standard_D8s_v3' assert c.get('azure', 'managed_disk_type') == 'Standard_LRS' assert c.user_home() == '/home/centos' assert c.mount_root() == '/var/data' assert c.force_format() == 'no' assert c.worker_data_dirs() == ['/var/data1', '/var/data2', '/var/data3'] assert c.default_data_dirs() == ['/var/data1', '/var/data2', '/var/data3'] assert c.metrics_drive_ids() == ['var-data1', 'var-data2', 'var-data3'] assert c.shutdown_delay_minutes() == '0' assert c.mounts(2) == ['/var/data0', '/var/data1'] assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert c.has_option('azure', 'resource_group') assert c.has_option('azure', 'vnet') assert c.has_option('azure', 'vnet_cidr') assert c.has_option('azure', 'subnet') assert c.has_option('azure', 'subnet_cidr') assert c.has_option('azure', 'numnodes') assert c.has_option('azure', 'location') assert c.instance_tags() == {} assert len(c.nodes()) == 6 assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper'] assert c.get_node('leader2') == ['metrics'] assert c.get_node('worker1') == ['worker', 'swarmmanager'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('accumulomaster') assert not c.has_service('fluo') assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4'] assert c.get_service_hostnames('zookeeper') == ['leader1'] assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)} assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.get_cluster_type() == 'azure' assert c.version("accumulo").startswith('2.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('3.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5'] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_user') == "centos" assert c.get('general', 'cluster_group') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'), ('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')] assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'), ('worker1', 'worker swarmmanager'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')]
def test_ec2_cluster(): c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster', '../conf/checksums', '../conf/templates', 'mycluster') assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe' assert c.checksum('accumulo') == 'df172111698c7a73aa031de09bd5589263a6b824482fbb9b4f0440a16602ed47' assert c.get('ec2', 'default_instance_type') == 'm5d.large' assert c.get('ec2', 'worker_instance_type') == 'm5d.large' assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7' assert c.user_home() == '/home/centos' assert c.max_ephemeral() == 1 assert c.mount_root() == '/media/ephemeral' assert c.fstype() == 'ext3' assert c.force_format() == 'no' assert c.worker_data_dirs() == ['/media/ephemeral0'] assert c.default_data_dirs() == ['/media/ephemeral0'] assert c.metrics_drive_ids() == ['media-ephemeral0'] assert c.shutdown_delay_minutes() == '0' assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1'] assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}, 'worker': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}} assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert not c.has_option('ec2', 'vpc_id') assert not c.has_option('ec2', 'subnet_id') assert c.get('ec2', 'key_name') == 'my_aws_key' assert c.instance_tags() == {} assert len(c.nodes()) == 6 assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper'] assert c.get_node('leader2') == ['metrics'] assert c.get_node('worker1') == ['worker', 'swarmmanager'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('accumulomaster') assert not c.has_service('fluo') assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4'] assert c.get_service_hostnames('zookeeper') == ['leader1'] assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)} assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.get_cluster_type() == 'ec2' assert c.version("accumulo").startswith('2.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('3.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5'] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_user') == "centos" assert c.get('general', 'cluster_group') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'), ('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')] assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'), ('worker1', 'worker swarmmanager'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')]
def test_defaults(): c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster', '../conf/checksums', 'mycluster') assert c.checksum_ver( 'accumulo', '1.9.0' ) == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe' assert c.checksum( 'accumulo' ) == 'baa5e0929248ff0d96355bc7fb42a5b75d183a83364519296e07b0adbb089180' assert c.get('ec2', 'default_instance_type') == 'm5d.large' assert c.get('ec2', 'worker_instance_type') == 'm5d.large' assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7' assert c.max_ephemeral() == 1 assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1'] assert c.node_type_map() == { 'default': { 'mounts': [ '/media/ephemeral0', ], 'devices': [ '/dev/nvme1n1', ] }, 'worker': { 'mounts': [ '/media/ephemeral0', ], 'devices': [ '/dev/nvme1n1', ] } } assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert not c.has_option('ec2', 'vpc_id') assert not c.has_option('ec2', 'subnet_id') assert c.get('ec2', 'key_name') == 'my_aws_key' assert c.instance_tags() == {} assert len(c.nodes()) == 6 assert c.get_node('leader1') == [ 'namenode', 'resourcemanager', 'accumulomaster', 'zookeeper' ] assert c.get_node('worker1') == ['worker', 'swarmmanager'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('accumulomaster') assert not c.has_service('fluo') assert c.get_service_hostnames('worker') == [ 'worker1', 'worker2', 'worker3', 'worker4' ] assert c.get_service_hostnames('zookeeper') == ['leader1'] assert c.get_hosts() == { 'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None) } assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.version("accumulo").startswith('2.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('3.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == [ '10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5' ] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_basedir') == "/home/centos" assert c.get('general', 'cluster_user') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'), ('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')] assert c.get_host_services() == [ ('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'), ('worker1', 'worker swarmmanager'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker') ]