log4j.appender.R.layout=org.apache.log4j.PatternLayout log4j.appender.R.layout.ConversionPattern=%(log_pattern)s """ replication = min(int(os.environ.get("REPLICATION", 2)), len(get_node_list('kafka'))) # Generate the Kafka configuration from the defined environment variables. config_model = { 'node_name': get_container_name(), 'broker_id': int(os.environ.get('BROKER_ID', 0)), 'host_address': get_container_host_address(), 'broker_port': get_port('broker', 9092), # Default log directory is /var/lib/kafka/logs. 'log_dirs': os.environ.get('LOG_DIRS', '/var/lib/kafka/logs'), 'num_partitions': int(os.environ.get('NUM_PARTITIONS', 8)), # Default retention is 7 days (168 hours). 'retention_hours': int(os.environ.get('RETENTION_HOURS', 168)), # Default retention is only based on time. 'retention_bytes': int(os.environ.get('RETENTION_BYTES', -1)), # Segment size (default is 0.5GB) 'log_segment_bytes': int(os.environ.get('LOG_SEGMENT_BYTES', 536870912)), # Minimum interval between rolling new log segments (default 1 week)
LOG_PATTERN = ( "%d{yyyy'-'MM'-'dd'T'HH:mm:ss.SSSXXX} %-5p [%-35.35t] [%-36.36c]: %m%n") # Build the ZooKeeper node configuration. conf = { 'tickTime': 2000, 'initLimit': 10, 'syncLimit': 5, 'dataDir': ZOOKEEPER_DATA_DIR, 'clientPort': get_port('client', 2181), 'quorumListenOnAllIPs': True, 'autopurge.snapRetainCount': int(os.environ.get('MAX_SNAPSHOT_RETAIN_COUNT', 10)), 'autopurge.purgeInterval': int(os.environ.get('PURGE_INTERVAL', 24)), } def build_node_repr(name): """Build the representation of a node with peer and leader-election ports.""" return '{}:{}:{}'.format( get_specific_host(get_service_name(), name), get_specific_port(get_service_name(), name, 'peer'),
print emap.get_service_name() print 'get_container_name()' print orig.get_container_name() print emap.get_container_name() print 'get_container_host_address()' print orig.get_container_host_address() print emap.get_container_host_address() print 'get_container_internal_address()' print orig.get_container_internal_address() print emap.get_container_internal_address() print 'get_port(name, default = )' print orig.get_port('smtp') print emap.get_port('smtp') print 'get_node_list(service, ports=None)' print orig.get_node_list('cassandra') print emap.get_node_list('cassandra') print orig.get_node_list('cassandra', ports = ['rpc']) print emap.get_node_list('cassandra', ports = ['rpc']) print 'get_specific_host(service, container)' print orig.get_specific_host('cassandra', 'cassandra1') print emap.get_specific_host('cassandra', 'cassandra1') print 'get_specific_port(service, container, port,' print orig.get_specific_port('cassandra', 'cassandra1', 'storage') print emap.get_specific_port('cassandra', 'cassandra1', 'storage')
KAFKA_LOGGING_TEMPLATE = """# Log4j configuration, logs to rotating file log4j.rootLogger=INFO,R log4j.appender.R=org.apache.log4j.RollingFileAppender log4j.appender.R.File=/var/docker-share/kafka/%(container_name)s.log log4j.appender.R.MaxFileSize=100MB log4j.appender.R.MaxBackupIndex=10 log4j.appender.R.layout=org.apache.log4j.PatternLayout log4j.appender.R.layout.ConversionPattern=%(log_pattern)s """ # Generate the Kafka configuration from the defined environment variables. config_model = { 'node_name': get_container_name(), 'broker_id': int(os.environ.get('BROKER_ID', 0)), 'broker_port': get_port('broker'), # Default log directory is /var/lib/kafka/logs. 'log_dirs': os.environ.get('LOG_DIRS', '/var/lib/kafka/logs'), 'zookeeper_connect': ZOOKEEPER_NODE_LIST, 'num_partitions': int(os.environ.get('NUM_PARTITIONS',1)), 'host_ip': HOST_IP } with open(KAFKA_CONFIG_FILE, 'w+') as conf: conf.write(KAFKA_CONFIG_TEMPLATE % config_model) # Setup the logging configuration. logging_model = { 'container_name': get_container_name(), 'log_pattern': LOG_PATTERN
log4j.appender.R=org.apache.log4j.RollingFileAppender log4j.appender.R.File=/var/log/%(service_name)s/%(container_name)s.log log4j.appender.R.MaxFileSize=100MB log4j.appender.R.MaxBackupIndex=10 log4j.appender.R.layout=org.apache.log4j.PatternLayout log4j.appender.R.layout.ConversionPattern=%(log_pattern)s """ replication = min(int(os.environ.get("REPLICATION", 2)), len(get_node_list('kafka'))) # Generate the Kafka configuration from the defined environment variables. config_model = { 'node_name': get_container_name(), 'broker_id': int(os.environ.get('BROKER_ID', 0)), 'host_address': get_container_host_address(), 'broker_port': get_port('broker', 9092), # Default log directory is /var/lib/kafka/logs. 'log_dirs': os.environ.get('LOG_DIRS', '/var/lib/kafka/logs'), 'num_partitions': int(os.environ.get('NUM_PARTITIONS', 8)), # Default retention is 7 days (168 hours). 'retention_hours': int(os.environ.get('RETENTION_HOURS', 168)), # Default retention is only based on time. 'retention_bytes': int(os.environ.get('RETENTION_BYTES', -1)), 'zookeeper_nodes': ZOOKEEPER_NODE_LIST, 'zookeeper_base': KAFKA_ZOOKEEPER_BASE, 'flush_interval_ms': int(os.environ.get('FLUSH_INTERVAL_MS', 10000)), 'flush_interval_msgs': int(os.environ.get('FLUSH_INTERVAL_MSGS', 10000)), 'num_threads': int(os.environ.get('NUM_THREADS', 8)), 'replication_factor': replication, 'num_replica_fetchers': int(os.environ.get('NUM_REPLICA_FETCHERS', 4)), 'replica_socket_timeout_ms': int(os.environ.get('REPLICA_SOCKET_TIMEOUT_MS', 2500)),
ZOOKEEPER_CONFIG_FILE = os.path.join('conf', 'zoo.cfg') ZOOKEEPER_LOG_CONFIG_FILE = os.path.join('conf', 'log4j.properties') ZOOKEEPER_DATA_DIR = '/var/lib/zookeeper' ZOOKEEPER_NODE_ID = None LOG_PATTERN = ( "%d{yyyy'-'MM'-'dd'T'HH:mm:ss.SSSXXX} %-5p [%-35.35t] [%-36.36c]: %m%n") # Build the ZooKeeper node configuration. conf = { 'tickTime': 2000, 'initLimit': 10, 'syncLimit': 5, 'dataDir': ZOOKEEPER_DATA_DIR, 'clientPort': get_port('client', 2181), 'quorumListenOnAllIPs': True, 'autopurge.snapRetainCount': int(os.environ.get('MAX_SNAPSHOT_RETAIN_COUNT', 10)), 'autopurge.purgeInterval': int(os.environ.get('PURGE_INTERVAL', 24)), } def build_node_repr(name): """Build the representation of a node with peer and leader-election ports.""" return '{}:{}:{}'.format( get_specific_host(get_service_name(), name), get_specific_port(get_service_name(), name, 'peer'), get_specific_port(get_service_name(), name, 'leader_election'))
sys.stderr.write( 'Starting {}, node id#{} of a {}-node ZooKeeper cluster...\n'.format( get_container_name(), ZOOKEEPER_NODE_ID, ZOOKEEPER_CLUSTER_SIZE)) else: sys.stderr.write( 'Starting {} as a single-node ZooKeeper cluster...\n'.format( get_container_name())) jvmflags = [ '-server', '-showversion', '-Dvisualvm.display.name="{}/{}"'.format(get_environment_name(), get_container_name()), ] jmx_port = get_port('jmx', -1) if jmx_port != -1: jvmflags += [ '-Djava.rmi.server.hostname={}'.format(get_container_host_address()), '-Dcom.sun.management.jmxremote.port={}'.format(jmx_port), '-Dcom.sun.management.jmxremote.rmi.port={}'.format(jmx_port), '-Dcom.sun.management.jmxremote.authenticate=false', '-Dcom.sun.management.jmxremote.local.only=false', '-Dcom.sun.management.jmxremote.ssl=false', ] os.environ['JVMFLAGS'] = ' '.join(jvmflags) + ' ' + os.environ.get( 'JVM_OPTS', '') # Start ZooKeeper os.execl('bin/zkServer.sh', 'zookeeper', 'start-foreground')
log4j.appender.R=org.apache.log4j.RollingFileAppender log4j.appender.R.File=/var/log/%(service_name)s/%(container_name)s.log log4j.appender.R.MaxFileSize=100MB log4j.appender.R.MaxBackupIndex=10 log4j.appender.R.layout=org.apache.log4j.PatternLayout log4j.appender.R.layout.ConversionPattern=%(log_pattern)s """ replication = min(int(os.environ.get("REPLICATION", 2)), len(get_node_list("kafka"))) # Generate the Kafka configuration from the defined environment variables. config_model = { "node_name": get_container_name(), "broker_id": int(os.environ.get("BROKER_ID", 0)), "host_address": get_container_host_address(), "broker_port": get_port("broker", 9092), # Default log directory is /var/lib/kafka/logs. "log_dirs": os.environ.get("LOG_DIRS", "/var/lib/kafka/logs"), "num_partitions": int(os.environ.get("NUM_PARTITIONS", 8)), # Default retention is 7 days (168 hours). "retention_hours": int(os.environ.get("RETENTION_HOURS", 168)), # Default retention is only based on time. "retention_bytes": int(os.environ.get("RETENTION_BYTES", -1)), # Segment size (default is 0.5GB) "log_segment_bytes": int(os.environ.get("LOG_SEGMENT_BYTES", 536870912)), # Minimum interval between rolling new log segments (default 1 week) "log_roll_hours": int(os.environ.get("LOG_ROLL_HOURS", 24 * 7)), "zookeeper_nodes": ZOOKEEPER_NODE_LIST, "zookeeper_base": KAFKA_ZOOKEEPER_BASE, "flush_interval_ms": int(os.environ.get("FLUSH_INTERVAL_MS", 10000)), "flush_interval_msgs": int(os.environ.get("FLUSH_INTERVAL_MSGS", 10000)),
sys.stderr.write( 'Starting {}, node id#{} of a {}-node ZooKeeper cluster...\n'.format( get_container_name(), ZOOKEEPER_NODE_ID, ZOOKEEPER_CLUSTER_SIZE)) else: sys.stderr.write( 'Starting {} as a single-node ZooKeeper cluster...\n'.format( get_container_name())) jvmflags = [ '-server', '-showversion', '-Dvisualvm.display.name="{}/{}"'.format(get_environment_name(), get_container_name()), ] jmx_port = get_port('jmx', -1) if jmx_port != -1: jvmflags += [ '-Djava.rmi.server.hostname={}'.format(get_container_host_address()), '-Dcom.sun.management.jmxremote.port={}'.format(jmx_port), '-Dcom.sun.management.jmxremote.authenticate=false', '-Dcom.sun.management.jmxremote.local.only=false', '-Dcom.sun.management.jmxremote.ssl=false', ] if RMI_ENABLED.lower() == 'true': rmi_port = get_port('rmi', jmx_port) if RMI_LOCAL_HOST.lower() == 'true': rmi_server = 'localhost' else: rmi_server = get_container_host_address() if rmi_port != -1:
import os import random from maestro.guestutils import get_container_name, \ get_container_host_address, \ get_environment_name, \ get_node_list, \ get_port, \ get_service_name LOAD_GENERATION = os.getenv('LOAD_GENERATION', None) if LOAD_GENERATION and bool(LOAD_GENERATION) is True: # Start load generation REST_URL_LIST = (get_node_list('restapi', ports=['rest'])) REST_URL = 'http://' + REST_URL_LIST[random.randrange(0, len(REST_URL_LIST))] + '/datapoint' SIMULATED_TISENSOR_COUNT = os.getenv('SIMULATED_TISENSOR_COUNT', 5) SIMULATED_TISENSOR_ID_HANDLE = os.getenv('SIMULATED_TISENSOR_ID_HANDLE', 'TISENSOR_DEFAULT_ID_') os.execl('/usr/bin/mvn', '-e', '-X', 'compile', 'exec:java', '-Dexec.mainClass=com.load.SimulatedTiSensor', '-Dexec.args='+REST_URL+' '+SIMULATED_TISENSOR_COUNT+' '+SIMULATED_TISENSOR_ID_HANDLE) else: # Start the REST API. KAFKA_BROKER_LIST = ','.join(get_node_list('kafka', ports=['broker'])) os.environ['KAFKA_BROKER_LIST'] = KAFKA_BROKER_LIST os.execl('/usr/bin/mvn', '-X', 'tomcat7:run', '-Dmaven.tomcat.port=' + (str)(get_port('rest')))
sys.stderr.write( 'Starting {}, node id#{} of a {}-node ZooKeeper cluster...\n' .format(get_container_name(), ZOOKEEPER_NODE_ID, ZOOKEEPER_CLUSTER_SIZE)) else: sys.stderr.write('Starting {} as a single-node ZooKeeper cluster...\n' .format(get_container_name())) jvmflags = [ '-server', '-showversion', '-Dvisualvm.display.name="{}/{}"'.format( get_environment_name(), get_container_name()), ] jmx_port = get_port('jmx', -1) if jmx_port != -1: jvmflags += [ '-Djava.rmi.server.hostname={}'.format(get_container_host_address()), '-Dcom.sun.management.jmxremote.port={}'.format(jmx_port), '-Dcom.sun.management.jmxremote.rmi.port={}'.format(jmx_port), '-Dcom.sun.management.jmxremote.authenticate=false', '-Dcom.sun.management.jmxremote.local.only=false', '-Dcom.sun.management.jmxremote.ssl=false', ] os.environ['JVMFLAGS'] = ' '.join(jvmflags) + ' ' + os.environ.get('JVM_OPTS', '') # Start ZooKeeper os.execl('bin/zkServer.sh', 'zookeeper', 'start-foreground')