get_port, \ get_service_name # Setup logging for Kazoo. logging.basicConfig(stream=sys.stdout, level=logging.INFO) os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) KAFKA_CONFIG_FILE = os.path.join('config', 'server.properties') KAFKA_LOGGING_CONFIG = os.path.join('config', 'log4j.properties') KAFKA_ZOOKEEPER_BASE = os.environ.get( 'ZOOKEEPER_BASE', '/{}/kafka'.format(get_environment_name())) LOG_PATTERN = "%d{yyyy'-'MM'-'dd'T'HH:mm:ss.SSSXXX} %-5p [%-35.35t] [%-36.36c]: %m%n" ZOOKEEPER_NODE_LIST = ','.join(get_node_list('zookeeper', ports=['client'])) KAFKA_CONFIG_TEMPLATE = """# Kafka configuration for %(node_name)s broker.id=%(broker_id)d advertised.host.name=%(host_address)s port=%(broker_port)d num.network.threads=%(num_threads)d num.io.threads=%(num_threads)d socket.send.buffer.bytes=1048576 socket.receive.buffer.bytes=1048576 socket.request.max.bytes=104857600 log.dirs=%(log_dirs)s num.partitions=%(num_partitions)d
# Add the ZooKeeper node list with peer and leader election ports and figure # out our own ID. ZOOKEEPER_SERVER_IDS contains a comma-separated list of # node:id tuples describing the server ID of each node in the cluster, by its # container name. If not specified, we assume single-node mode. if os.environ.get('ZOOKEEPER_SERVER_IDS'): servers = os.environ['ZOOKEEPER_SERVER_IDS'].split(',') for server in servers: node, id = server.split(':') conf['server.{}'.format(id)] = build_node_repr(node) if node == get_container_name(): ZOOKEEPER_NODE_ID = id # Verify that the number of declared nodes matches the size of the cluster. ZOOKEEPER_NODE_COUNT = len(get_node_list(get_service_name())) ZOOKEEPER_CLUSTER_SIZE = len( [i for i in conf.keys() if i.startswith('server.')]) # If no ZOOKEEPER_SERVER_IDS is defined, we expect to be in single-node mode so # no more than one node can be declared in the cluster. if ZOOKEEPER_CLUSTER_SIZE == 0 and ZOOKEEPER_NODE_COUNT != 1: sys.stderr.write( ('Missing ZOOKEEPER_SERVER_IDS declaration for ' + '{}-node ZooKeeper cluster!\n').format(ZOOKEEPER_NODE_COUNT)) sys.exit(1) # If we got nodes from ZOOKEEPER_SERVER_IDS, we expect exactly the same number # of nodes declared in the cluster. if ZOOKEEPER_CLUSTER_SIZE > 0 and \ ZOOKEEPER_CLUSTER_SIZE != ZOOKEEPER_NODE_COUNT:
print emap.get_container_name() print 'get_container_host_address()' print orig.get_container_host_address() print emap.get_container_host_address() print 'get_container_internal_address()' print orig.get_container_internal_address() print emap.get_container_internal_address() print 'get_port(name, default = )' print orig.get_port('smtp') print emap.get_port('smtp') print 'get_node_list(service, ports=None)' print orig.get_node_list('cassandra') print emap.get_node_list('cassandra') print orig.get_node_list('cassandra', ports = ['rpc']) print emap.get_node_list('cassandra', ports = ['rpc']) print 'get_specific_host(service, container)' print orig.get_specific_host('cassandra', 'cassandra1') print emap.get_specific_host('cassandra', 'cassandra1') print 'get_specific_port(service, container, port,' print orig.get_specific_port('cassandra', 'cassandra1', 'storage') print emap.get_specific_port('cassandra', 'cassandra1', 'storage') print 'get_specific_exposed_port' try: print orig.get_specific_exposed_port('cassandra', 'cassandra1', 'storage')
} with open(KAFKA_LOGGING_CONFIG, 'w+') as f: f.write(KAFKA_LOGGING_TEMPLATE % logging_model) KAFKA_ZOOKEEPER_BASE = os.environ.get('ZOOKEEPER_DATADIR', '/tmp/zookeeper') #ZOOKEEPER_NODE_LIST = os.environ.get('ZOOKEEPER_CONNECT', 'localhost:2181') def ensure_kafka_zk_path(retries=1): while retries >= 0: # Connect to the ZooKeeper nodes. Use a pretty large timeout in case they were # just started. We should wait for them for a little while. # sys.stderr.write(ZOOKEEPER_NODE_LIST) zk = KazooClient(hosts=ZOOKEEPER_NODE_LIST, timeout=30000) try: zk.start() zk.ensure_path(KAFKA_ZOOKEEPER_BASE) return True except: retries -= 1 finally: zk.stop() return False node_number = get_node_list('kafka') print node_number if not ensure_kafka_zk_path(retries = len(node_number)): sys.stderr.write('Could not create the base ZooKeeper path for Kafka!\n') sys.exit(1) # Start the Kafka broker. os.execl('bin/kafka-server-start.sh', 'kafka', 'config/server.properties')
# Add the ZooKeeper node list with peer and leader election ports and figure # out our own ID. ZOOKEEPER_SERVER_IDS contains a comma-separated list of # node:id tuples describing the server ID of each node in the cluster, by its # container name. If not specified, we assume single-node mode. if os.environ.get('ZOOKEEPER_SERVER_IDS'): servers = os.environ['ZOOKEEPER_SERVER_IDS'].split(',') for server in servers: node, id = server.split(':') conf['server.{}'.format(id)] = build_node_repr(node) if node == get_container_name(): ZOOKEEPER_NODE_ID = id # Verify that the number of declared nodes matches the size of the cluster. ZOOKEEPER_NODE_COUNT = len(get_node_list(get_service_name())) ZOOKEEPER_CLUSTER_SIZE = len( [i for i in conf.keys() if i.startswith('server.')]) # If no ZOOKEEPER_SERVER_IDS is defined, we expect to be in single-node mode so # no more than one node can be declared in the cluster. if ZOOKEEPER_CLUSTER_SIZE == 0 and ZOOKEEPER_NODE_COUNT != 1: sys.stderr.write(('Missing ZOOKEEPER_SERVER_IDS declaration for ' + '{}-node ZooKeeper cluster!\n') .format(ZOOKEEPER_NODE_COUNT)) sys.exit(1) # If we got nodes from ZOOKEEPER_SERVER_IDS, we expect exactly the same number # of nodes declared in the cluster. if ZOOKEEPER_CLUSTER_SIZE > 0 and \ ZOOKEEPER_CLUSTER_SIZE != ZOOKEEPER_NODE_COUNT:
# Setup logging for Kazoo. logging.basicConfig(stream=sys.stdout, level=logging.INFO) os.chdir(os.path.join( os.path.dirname(os.path.abspath(__file__)), '..')) KAFKA_CONFIG_FILE = os.path.join('config', 'server.properties') KAFKA_LOGGING_CONFIG = os.path.join('config', 'log4j.properties') KAFKA_ZOOKEEPER_BASE = os.environ.get('ZOOKEEPER_BASE', '/{}/kafka'.format(get_environment_name())) LOG_PATTERN = "%d{yyyy'-'MM'-'dd'T'HH:mm:ss.SSSXXX} %-5p [%-35.35t] [%-36.36c]: %m%n" ZOOKEEPER_NODE_LIST = ','.join(get_node_list('zookeeper', ports=['client'])) KAFKA_CONFIG_TEMPLATE = """# Kafka configuration for %(node_name)s broker.id=%(broker_id)d advertised.host.name=%(host_address)s port=%(broker_port)d num.network.threads=%(num_threads)d num.io.threads=%(num_threads)d socket.send.buffer.bytes=1048576 socket.receive.buffer.bytes=1048576 socket.request.max.bytes=104857600 log.dirs=%(log_dirs)s num.partitions=%(num_partitions)d
get_port, get_service_name, ) # Setup logging for Kazoo. logging.basicConfig(stream=sys.stdout, level=logging.INFO) os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) KAFKA_CONFIG_FILE = os.path.join("config", "server.properties") KAFKA_LOGGING_CONFIG = os.path.join("config", "log4j.properties") KAFKA_ZOOKEEPER_BASE = os.environ.get("ZOOKEEPER_BASE", "/{}/kafka".format(get_environment_name())) LOG_PATTERN = "%d{yyyy'-'MM'-'dd'T'HH:mm:ss.SSSXXX} %-5p [%-35.35t] [%-36.36c]: %m%n" ZOOKEEPER_NODE_LIST = ",".join(get_node_list("zookeeper", ports=["client"])) KAFKA_CONFIG_TEMPLATE = """# Kafka configuration for %(node_name)s broker.id=%(broker_id)d advertised.host.name=%(host_address)s port=%(broker_port)d num.network.threads=%(num_threads)d num.io.threads=%(num_threads)d socket.send.buffer.bytes=1048576 socket.receive.buffer.bytes=1048576 socket.request.max.bytes=104857600 log.dirs=%(log_dirs)s num.partitions=%(num_partitions)d
import os import urllib import sys import trace from subprocess import call from maestro.guestutils import get_container_name, \ get_container_host_address, \ get_environment_name, \ get_node_list, \ get_port, \ get_service_name ZOOKEEPER_NODE_LIST = ','.join(s + "/zk-kafka/kafka" for s in (get_node_list( 'zookeeper', ports=['client']))) KAFKA_TOPIC = os.getenv('KAFKA_TOPIC') call(['mvn', 'package']) call(['/opt/spark/bin/spark-submit', '--class', 'com.tisensor.App', 'target/tisensor_spark-jar-with-dependencies.jar', KAFKA_TOPIC, str(ZOOKEEPER_NODE_LIST)])
import os import random from maestro.guestutils import get_container_name, \ get_container_host_address, \ get_environment_name, \ get_node_list, \ get_port, \ get_service_name LOAD_GENERATION = os.getenv('LOAD_GENERATION', None) if LOAD_GENERATION and bool(LOAD_GENERATION) is True: # Start load generation REST_URL_LIST = (get_node_list('restapi', ports=['rest'])) REST_URL = 'http://' + REST_URL_LIST[random.randrange(0, len(REST_URL_LIST))] + '/datapoint' SIMULATED_TISENSOR_COUNT = os.getenv('SIMULATED_TISENSOR_COUNT', 5) SIMULATED_TISENSOR_ID_HANDLE = os.getenv('SIMULATED_TISENSOR_ID_HANDLE', 'TISENSOR_DEFAULT_ID_') os.execl('/usr/bin/mvn', '-e', '-X', 'compile', 'exec:java', '-Dexec.mainClass=com.load.SimulatedTiSensor', '-Dexec.args='+REST_URL+' '+SIMULATED_TISENSOR_COUNT+' '+SIMULATED_TISENSOR_ID_HANDLE) else: # Start the REST API. KAFKA_BROKER_LIST = ','.join(get_node_list('kafka', ports=['broker'])) os.environ['KAFKA_BROKER_LIST'] = KAFKA_BROKER_LIST os.execl('/usr/bin/mvn', '-X', 'tomcat7:run', '-Dmaven.tomcat.port=' + (str)(get_port('rest')))