def build_node_repr(name): """Build the representation of a node with peer and leader-election ports.""" return '{}:{}:{}'.format( get_specific_host(get_service_name(), name), get_specific_port(get_service_name(), name, 'peer'), get_specific_port(get_service_name(), name, 'leader_election'))
def build_node_repr(name): """Build the representation of a node with peer and leader-election ports.""" global conf raw_peer_type_env = '%s_%s_peer_type' % (get_service_name(), name) peer_type_env = raw_peer_type_env.replace('-', '_').upper() peer_type = os.environ.get(peer_type_env, None) if peer_type == 'observer': conf['peerType'] = 'observer' return '{}:{}:{}:observer'.format( get_specific_host(get_service_name(), name), get_specific_port(get_service_name(), name, 'peer'), get_specific_port(get_service_name(), name, 'leader_election')) else: return '{}:{}:{}'.format( get_specific_host(get_service_name(), name), get_specific_port(get_service_name(), name, 'peer'), get_specific_port(get_service_name(), name, 'leader_election'))
def getKafkaZookeeperNode(name): return '{}:{}'.format( get_specific_host('zookeeper', name), get_specific_port('zookeeper', name, 'client'))
print 'get_container_internal_address()' print orig.get_container_internal_address() print emap.get_container_internal_address() print 'get_port(name, default = )' print orig.get_port('smtp') print emap.get_port('smtp') print 'get_node_list(service, ports=None)' print orig.get_node_list('cassandra') print emap.get_node_list('cassandra') print orig.get_node_list('cassandra', ports = ['rpc']) print emap.get_node_list('cassandra', ports = ['rpc']) print 'get_specific_host(service, container)' print orig.get_specific_host('cassandra', 'cassandra1') print emap.get_specific_host('cassandra', 'cassandra1') print 'get_specific_port(service, container, port,' print orig.get_specific_port('cassandra', 'cassandra1', 'storage') print emap.get_specific_port('cassandra', 'cassandra1', 'storage') print 'get_specific_exposed_port' try: print orig.get_specific_exposed_port('cassandra', 'cassandra1', 'storage') except: print 'there wasnt one for cassandra' print emap.get_specific_exposed_port('cassandra', 'cassandra1', 'storage') try: print orig.get_specific_exposed_port('rails', 'rails1', 'smtp')
import os import sys import subprocess import shutil from jinja2 import Environment from maestro.guestutils import get_container_name, \ get_container_host_address, \ get_environment_name, \ get_node_list, \ get_port, \ get_service_name, \ get_specific_host, \ get_specific_port, \ _get_service_instance_names servers = _get_service_instance_names('hadoop-slave') dict={} for server in servers: host = get_specific_host('hadoop-slave', server) dict[host] = server slaves = """{% for key, value in _dict.iteritems() %}echo "{{ value }}" >> /usr/local/spark/spark-1.5.1-bin-hadoop2.6/conf/slaves {% endfor %}{% for key, value in _dict.iteritems() %}sshpass -p '123' ssh {{ key }} -l root "mkdir -p /usr/local/spark" scp -r /usr/local/spark/spark-1.5.1-bin-hadoop2.6 root@{{ key }}:/usr/local/spark {% endfor %}""" with open("shells.sh", "wb") as f: f.write(Environment().from_string(slaves).render(_dict=dict)) subprocess.call(["bash", "shells.sh"])
from jinja2 import Environment from maestro.guestutils import get_container_name, \ get_container_host_address, \ get_environment_name, \ get_node_list, \ get_port, \ get_service_name, \ get_specific_host, \ get_specific_port, \ _get_service_instance_names servers = _get_service_instance_names('hadoop-slave') dict={} for server in servers: host = get_specific_host('hadoop-slave', server) dict[host] = server slaves = """{% for key, value in _dict.iteritems() %}echo {{ value }} >> /usr/local/hadoop/etc/hadoop/slaves {% endfor %}""" slaves_file= Environment().from_string(slaves).render(_dict=dict) #f = open("/usr/local/hadoop/etc/hadoop/slaves", "wb") #f.write(slaves_file) #f.close() #f = open("/usr/local/hadoop/etc/hadoop/masters", "wb") #f.write("Master\n") #f.close() host = get_specific_host('spark-master', 'Master') dict[host] = 'Master' shell_single = """scp -r /usr/local/hadoop root@{{ _ip }}:/usr/local
import sys import time import subprocess from jinja2 import Environment from maestro.guestutils import get_container_name, \ get_container_host_address, \ get_environment_name, \ get_node_list, \ get_port, \ get_service_name, \ get_specific_host, \ get_specific_port, \ _get_service_instance_names servers = _get_service_instance_names('hadoop-slave') servers_ip = map(lambda name: get_specific_host('hadoop-slave', name), servers) time.sleep(30) sendkeybash = """#!/bin/bash {% for slave_ip in slave_ips %}expect -c " spawn "scp\\ /root/.ssh/id_rsa.pub\\ root@{{ slave_ip }}:/root" expect "?assword:" send \\\"{{ password }}\\r\\\" expect eof" sshpass -p '{{ password }}' ssh {{ slave_ip }} -l root "cat ~/id_rsa.pub >> ~/.ssh/authorized_keys" {% endfor %}""" fileContent = Environment().from_string(sendkeybash).render(password="******", slave_ips=servers_ip) with open("sendKey.sh", "wb") as f: f.write(fileContent) subprocess.call(["bash", "sendKey.sh"])