def test_case_sensitive(): c = DeployConfig("muchos", '../../conf/muchos.props.example', '../../conf/hosts/example/example_cluster', 'mycluster') assert c.has_option('ec2', 'aws_secret_key') == True assert c.has_option('ec2', 'Aws_secret_key') == False c.set('nodes', 'CamelCaseWorker', 'worker,fluo') c.init_nodes() assert c.get_node('CamelCaseWorker') == ['worker', 'fluo']
def test_case_sensitive(): c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster', '../conf/checksums', '../conf/templates', 'mycluster') assert c.has_option('ec2', 'default_instance_type') == True assert c.has_option('ec2', 'Default_instance_type') == False c.set('nodes', 'CamelCaseWorker', 'worker,fluo') c.init_nodes() assert c.get_node('CamelCaseWorker') == ['worker', 'fluo']
def test_defaults(): c = DeployConfig("muchos", '../../conf/muchos.props.example', '../../conf/hosts/example/example_cluster', 'mycluster') assert c.get('ec2', 'default_instance_type') == 'm3.large' assert c.get('ec2', 'worker_instance_type') == 'm3.large' assert c.num_ephemeral('worker1') == 1 assert c.num_ephemeral('worker1') == c.worker_num_ephemeral() assert c.max_ephemeral() == 1 assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1'] assert c.devices(2) == ['/dev/xvdb', '/dev/xvdc'] assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0',], 'devices': ['/dev/xvdb',]}, 'worker': {'mounts': ['/media/ephemeral0',], 'devices': ['/dev/xvdb',]}} assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert c.get('ec2', 'region') == 'us-east-1' assert c.has_option('ec2', 'vpc_id') == False assert c.has_option('ec2', 'subnet_id') == False assert c.get('ec2', 'key_name') == 'my_aws_key' assert c.instance_tags() == {} assert len(c.nodes()) == 7 assert c.get_node('leader1') == ['namenode', 'zookeeper', 'fluo'] assert c.get_node('worker1') == ['worker'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('fluo') assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3'] assert c.get_service_hostnames('zookeeper') == ['leader1', 'leader2', 'leader3'] assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader3': ('10.0.0.2', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.3', None), 'worker3': ('10.0.0.5', None), 'worker2': ('10.0.0.4', None)} assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.version("accumulo").startswith('1.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('2.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == ['10.0.0.3', '10.0.0.4', '10.0.0.5'] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_basedir') == "/home/centos" assert c.get('general', 'cluster_user') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'leader3'), ('10.0.0.3', 'worker1'), ('10.0.0.4', 'worker2'), ('10.0.0.5', 'worker3')] assert c.get_host_services() == [('leader1', 'namenode zookeeper fluo'), ('leader2', 'resourcemanager zookeeper'), ('leader3', 'accumulomaster zookeeper'), ('metrics', 'metrics'), ('worker1', 'worker'), ('worker2', 'worker'), ('worker3', 'worker')] assert c.get_image_id('m3.large') == 'ami-6d1c2007' assert c.get('ec2', 'aws_access_key') == 'access_key' assert c.get('ec2', 'aws_secret_key') == 'secret_key'
def main(): deploy_path = os.environ.get('MUCHOS_HOME') if not deploy_path: exit('ERROR - MUCHOS_HOME env variable must be set!') if not os.path.isdir(deploy_path): exit('ERROR - Directory set by MUCHOS_HOME does not exist: ' + deploy_path) config_path = join(deploy_path, "conf/muchos.props") if not isfile(config_path): exit('ERROR - A config file does not exist at ' + config_path) checksums_path = join(deploy_path, "conf/checksums") if not isfile(checksums_path): exit('ERROR - A checksums file does not exist at ' + checksums_path) hosts_dir = join(deploy_path, "conf/hosts/") # parse command line args retval = parse_args(hosts_dir) if not retval: print("Invalid command line arguments. For help, use 'muchos -h'") sys.exit(1) (opts, action, args) = retval hosts_path = join(hosts_dir, opts.cluster) templates_path = join(deploy_path, "conf/templates/") config = DeployConfig(deploy_path, config_path, hosts_path, checksums_path, templates_path, opts.cluster) config.verify_config(action) if action == 'config': if opts.property == 'all': config.print_all() else: config.print_property(opts.property) else: cluster_type = config.get('general', 'cluster_type') if cluster_type == 'existing': from muchos.existing import ExistingCluster cluster = ExistingCluster(config) cluster.perform(action) elif cluster_type == 'ec2': from muchos.ec2 import Ec2Cluster, Ec2ClusterTemplate if config.has_option('ec2', 'cluster_template'): cluster = Ec2ClusterTemplate(config) else: cluster = Ec2Cluster(config) cluster.perform(action) elif cluster_type == 'azure': from muchos.azure import VmssCluster cluster = VmssCluster(config) cluster.perform(action) else: exit('Unknown cluster_type: ' + cluster_type)
def test_azure_cluster(): c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster', '../conf/checksums', '../conf/templates', 'mycluster') # since we are sharing a single muchos.props.example file, we need # to stub the cluster type to be azure (as the file itself has a default of ec2) c.cluster_type = 'azure' assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe' assert c.checksum('accumulo') == 'df172111698c7a73aa031de09bd5589263a6b824482fbb9b4f0440a16602ed47' assert c.get('azure', 'vm_sku') == 'Standard_D8s_v3' assert c.get('azure', 'managed_disk_type') == 'Standard_LRS' assert c.user_home() == '/home/centos' assert c.mount_root() == '/var/data' assert c.force_format() == 'no' assert c.worker_data_dirs() == ['/var/data1', '/var/data2', '/var/data3'] assert c.default_data_dirs() == ['/var/data1', '/var/data2', '/var/data3'] assert c.metrics_drive_ids() == ['var-data1', 'var-data2', 'var-data3'] assert c.shutdown_delay_minutes() == '0' assert c.mounts(2) == ['/var/data0', '/var/data1'] assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert c.has_option('azure', 'resource_group') assert c.has_option('azure', 'vnet') assert c.has_option('azure', 'vnet_cidr') assert c.has_option('azure', 'subnet') assert c.has_option('azure', 'subnet_cidr') assert c.has_option('azure', 'numnodes') assert c.has_option('azure', 'location') assert c.instance_tags() == {} assert len(c.nodes()) == 6 assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper'] assert c.get_node('leader2') == ['metrics'] assert c.get_node('worker1') == ['worker', 'swarmmanager'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('accumulomaster') assert not c.has_service('fluo') assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4'] assert c.get_service_hostnames('zookeeper') == ['leader1'] assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)} assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.get_cluster_type() == 'azure' assert c.version("accumulo").startswith('2.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('3.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5'] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_user') == "centos" assert c.get('general', 'cluster_group') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'), ('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')] assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'), ('worker1', 'worker swarmmanager'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')]
def test_ec2_cluster(): c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster', '../conf/checksums', '../conf/templates', 'mycluster') assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe' assert c.checksum('accumulo') == 'df172111698c7a73aa031de09bd5589263a6b824482fbb9b4f0440a16602ed47' assert c.get('ec2', 'default_instance_type') == 'm5d.large' assert c.get('ec2', 'worker_instance_type') == 'm5d.large' assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7' assert c.user_home() == '/home/centos' assert c.max_ephemeral() == 1 assert c.mount_root() == '/media/ephemeral' assert c.fstype() == 'ext3' assert c.force_format() == 'no' assert c.worker_data_dirs() == ['/media/ephemeral0'] assert c.default_data_dirs() == ['/media/ephemeral0'] assert c.metrics_drive_ids() == ['media-ephemeral0'] assert c.shutdown_delay_minutes() == '0' assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1'] assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}, 'worker': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}} assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert not c.has_option('ec2', 'vpc_id') assert not c.has_option('ec2', 'subnet_id') assert c.get('ec2', 'key_name') == 'my_aws_key' assert c.instance_tags() == {} assert len(c.nodes()) == 6 assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper'] assert c.get_node('leader2') == ['metrics'] assert c.get_node('worker1') == ['worker', 'swarmmanager'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('accumulomaster') assert not c.has_service('fluo') assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4'] assert c.get_service_hostnames('zookeeper') == ['leader1'] assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)} assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.get_cluster_type() == 'ec2' assert c.version("accumulo").startswith('2.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('3.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5'] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_user') == "centos" assert c.get('general', 'cluster_group') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'), ('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')] assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'), ('worker1', 'worker swarmmanager'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')]
def test_defaults(): c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster', '../conf/checksums', 'mycluster') assert c.checksum_ver( 'accumulo', '1.9.0' ) == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe' assert c.checksum( 'accumulo' ) == 'baa5e0929248ff0d96355bc7fb42a5b75d183a83364519296e07b0adbb089180' assert c.get('ec2', 'default_instance_type') == 'm5d.large' assert c.get('ec2', 'worker_instance_type') == 'm5d.large' assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7' assert c.max_ephemeral() == 1 assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1'] assert c.node_type_map() == { 'default': { 'mounts': [ '/media/ephemeral0', ], 'devices': [ '/dev/nvme1n1', ] }, 'worker': { 'mounts': [ '/media/ephemeral0', ], 'devices': [ '/dev/nvme1n1', ] } } assert c.node_type('worker1') == 'worker' assert c.node_type('leader1') == 'default' assert not c.has_option('ec2', 'vpc_id') assert not c.has_option('ec2', 'subnet_id') assert c.get('ec2', 'key_name') == 'my_aws_key' assert c.instance_tags() == {} assert len(c.nodes()) == 6 assert c.get_node('leader1') == [ 'namenode', 'resourcemanager', 'accumulomaster', 'zookeeper' ] assert c.get_node('worker1') == ['worker', 'swarmmanager'] assert c.get_node('worker2') == ['worker'] assert c.get_node('worker3') == ['worker'] assert c.has_service('accumulomaster') assert not c.has_service('fluo') assert c.get_service_hostnames('worker') == [ 'worker1', 'worker2', 'worker3', 'worker4' ] assert c.get_service_hostnames('zookeeper') == ['leader1'] assert c.get_hosts() == { 'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None) } assert c.get_public_ip('leader1') == '23.0.0.0' assert c.get_private_ip('leader1') == '10.0.0.0' assert c.cluster_name == 'mycluster' assert c.version("accumulo").startswith('2.') assert c.version("fluo").startswith('1.') assert c.version("hadoop").startswith('3.') assert c.version("zookeeper").startswith('3.') assert c.get_service_private_ips("worker") == [ '10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5' ] assert c.get('general', 'proxy_hostname') == "leader1" assert c.proxy_public_ip() == "23.0.0.0" assert c.proxy_private_ip() == "10.0.0.0" assert c.get('general', 'cluster_basedir') == "/home/centos" assert c.get('general', 'cluster_user') == "centos" assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'), ('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')] assert c.get_host_services() == [ ('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'), ('worker1', 'worker swarmmanager'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker') ]
def main(): deploy_path = environ.get("MUCHOS_HOME") if not deploy_path: exit("ERROR - MUCHOS_HOME env variable must be set!") if not path.isdir(deploy_path): exit("ERROR - Directory set by MUCHOS_HOME does not exist: " + deploy_path) config_path = path.join(deploy_path, "conf/muchos.props") if not path.isfile(config_path): exit("ERROR - A config file does not exist at " + config_path) checksums_path = path.join(deploy_path, "conf/checksums") if not path.isfile(checksums_path): exit("ERROR - A checksums file does not exist at " + checksums_path) hosts_dir = path.join(deploy_path, "conf/hosts/") # parse command line args retval = parse_args(hosts_dir) if not retval: print("Invalid command line arguments. For help, use 'muchos -h'") exit(1) (opts, action, args) = retval hosts_path = path.join(hosts_dir, opts.cluster) templates_path = path.join(deploy_path, "conf/templates/") config = DeployConfig( deploy_path, config_path, hosts_path, checksums_path, templates_path, opts.cluster, ) config.verify_config(action) if action == "config": if opts.property == "all": config.print_all() else: config.print_property(opts.property) else: cluster_type = config.get("general", "cluster_type") if cluster_type == "existing": from muchos.existing import ExistingCluster cluster = ExistingCluster(config) cluster.perform(action) elif cluster_type == "ec2": from muchos.ec2 import Ec2Cluster, Ec2ClusterTemplate if config.has_option("ec2", "cluster_template"): cluster = Ec2ClusterTemplate(config) else: cluster = Ec2Cluster(config) cluster.perform(action) elif cluster_type == "azure": from muchos.azure import VmssCluster cluster = VmssCluster(config) cluster.perform(action) else: exit("Unknown cluster_type: " + cluster_type)