Example #1
0
    return '{}:{}:{}'.format(
        get_specific_host(get_service_name(), name),
        get_specific_port(get_service_name(), name, 'peer'),
        get_specific_port(get_service_name(), name, 'leader_election'))


# Add the ZooKeeper node list with peer and leader election ports and figure
# out our own ID. ZOOKEEPER_SERVER_IDS contains a comma-separated list of
# node:id tuples describing the server ID of each node in the cluster, by its
# container name. If not specified, we assume single-node mode.
if os.environ.get('ZOOKEEPER_SERVER_IDS'):
    servers = os.environ['ZOOKEEPER_SERVER_IDS'].split(',')
    for server in servers:
        node, id = server.split(':')
        conf['server.{}'.format(id)] = build_node_repr(node)
        if node == get_container_name():
            ZOOKEEPER_NODE_ID = id

# Verify that the number of declared nodes matches the size of the cluster.
ZOOKEEPER_NODE_COUNT = len(get_node_list(get_service_name()))
ZOOKEEPER_CLUSTER_SIZE = len(
    [i for i in conf.keys() if i.startswith('server.')])

# If no ZOOKEEPER_SERVER_IDS is defined, we expect to be in single-node mode so
# no more than one node can be declared in the cluster.
if ZOOKEEPER_CLUSTER_SIZE == 0 and ZOOKEEPER_NODE_COUNT != 1:
    sys.stderr.write(
        ('Missing ZOOKEEPER_SERVER_IDS declaration for ' +
         '{}-node ZooKeeper cluster!\n').format(ZOOKEEPER_NODE_COUNT))
    sys.exit(1)
def fix_collectd_file():
    update_in_file('/etc/collectd/collectd.conf', '%%%HOSTNAME%%%',
                   get_service_name() + '.' + get_container_name())
Example #3
0
log4j.rootLogger=INFO,R

log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=/var/log/%(service_name)s/%(container_name)s.log
log4j.appender.R.MaxFileSize=100MB
log4j.appender.R.MaxBackupIndex=10
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%(log_pattern)s
"""

replication = min(int(os.environ.get("REPLICATION", 2)),
                  len(get_node_list('kafka')))
# Generate the Kafka configuration from the defined environment variables.
config_model = {
    'node_name':
    get_container_name(),
    'broker_id':
    int(os.environ.get('BROKER_ID', 0)),
    'host_address':
    get_container_host_address(),
    'broker_port':
    get_port('broker', 9092),
    # Default log directory is /var/lib/kafka/logs.
    'log_dirs':
    os.environ.get('LOG_DIRS', '/var/lib/kafka/logs'),
    'num_partitions':
    int(os.environ.get('NUM_PARTITIONS', 8)),
    # Default retention is 7 days (168 hours).
    'retention_hours':
    int(os.environ.get('RETENTION_HOURS', 168)),
    # Default retention is only based on time.
Example #4
0
import maestro.guestutils as orig
import maestro_etcd_map1 as emap

print 'testing maestro and etcdmap'

print orig.get_environment_name()
print emap.get_environment_name()

print 'print orig.get_service_name()'
print orig.get_service_name()
print emap.get_service_name()

print 'get_container_name()'
print orig.get_container_name()
print emap.get_container_name()

print 'get_container_host_address()'
print orig.get_container_host_address()
print emap.get_container_host_address()

print 'get_container_internal_address()'
print orig.get_container_internal_address()
print emap.get_container_internal_address()

print 'get_port(name, default = )'
print orig.get_port('smtp')
print emap.get_port('smtp')

print 'get_node_list(service, ports=None)'
print orig.get_node_list('cassandra')
print emap.get_node_list('cassandra')
"""

KAFKA_LOGGING_TEMPLATE = """# Log4j configuration, logs to rotating file
log4j.rootLogger=INFO,R

log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=/var/docker-share/kafka/%(container_name)s.log
log4j.appender.R.MaxFileSize=100MB
log4j.appender.R.MaxBackupIndex=10
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%(log_pattern)s
"""

# Generate the Kafka configuration from the defined environment variables.
config_model = {
    'node_name': get_container_name(),
    'broker_id': int(os.environ.get('BROKER_ID', 0)),
    'broker_port': get_port('broker'),
    # Default log directory is /var/lib/kafka/logs.
    'log_dirs': os.environ.get('LOG_DIRS', '/var/lib/kafka/logs'),
    'zookeeper_connect': ZOOKEEPER_NODE_LIST,
    'num_partitions': int(os.environ.get('NUM_PARTITIONS',1)),
    'host_ip': HOST_IP
}

with open(KAFKA_CONFIG_FILE, 'w+') as conf:
    conf.write(KAFKA_CONFIG_TEMPLATE % config_model)


# Setup the logging configuration.
logging_model = {
Example #6
0
    return '{}:{}:{}'.format(
        get_specific_host(get_service_name(), name),
        get_specific_port(get_service_name(), name, 'peer'),
        get_specific_port(get_service_name(), name, 'leader_election'))


# Add the ZooKeeper node list with peer and leader election ports and figure
# out our own ID. ZOOKEEPER_SERVER_IDS contains a comma-separated list of
# node:id tuples describing the server ID of each node in the cluster, by its
# container name. If not specified, we assume single-node mode.
if os.environ.get('ZOOKEEPER_SERVER_IDS'):
    servers = os.environ['ZOOKEEPER_SERVER_IDS'].split(',')
    for server in servers:
        node, id = server.split(':')
        conf['server.{}'.format(id)] = build_node_repr(node)
        if node == get_container_name():
            ZOOKEEPER_NODE_ID = id

# Verify that the number of declared nodes matches the size of the cluster.
ZOOKEEPER_NODE_COUNT = len(get_node_list(get_service_name()))
ZOOKEEPER_CLUSTER_SIZE = len(
    [i for i in conf.keys() if i.startswith('server.')])

# If no ZOOKEEPER_SERVER_IDS is defined, we expect to be in single-node mode so
# no more than one node can be declared in the cluster.
if ZOOKEEPER_CLUSTER_SIZE == 0 and ZOOKEEPER_NODE_COUNT != 1:
    sys.stderr.write(('Missing ZOOKEEPER_SERVER_IDS declaration for ' +
                      '{}-node ZooKeeper cluster!\n')
                     .format(ZOOKEEPER_NODE_COUNT))
    sys.exit(1)
Example #7
0
KAFKA_LOGGING_TEMPLATE = """# Log4j configuration, logs to rotating file
log4j.rootLogger=INFO,R

log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=/var/log/%(service_name)s/%(container_name)s.log
log4j.appender.R.MaxFileSize=100MB
log4j.appender.R.MaxBackupIndex=10
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%(log_pattern)s
"""

replication = min(int(os.environ.get("REPLICATION", 2)), len(get_node_list('kafka')))
# Generate the Kafka configuration from the defined environment variables.
config_model = {
    'node_name': get_container_name(),
    'broker_id': int(os.environ.get('BROKER_ID', 0)),
    'host_address': get_container_host_address(),
    'broker_port': get_port('broker', 9092),
    # Default log directory is /var/lib/kafka/logs.
    'log_dirs': os.environ.get('LOG_DIRS', '/var/lib/kafka/logs'),
    'num_partitions': int(os.environ.get('NUM_PARTITIONS', 8)),
    # Default retention is 7 days (168 hours).
    'retention_hours': int(os.environ.get('RETENTION_HOURS', 168)),
    # Default retention is only based on time.
    'retention_bytes': int(os.environ.get('RETENTION_BYTES', -1)),
    'zookeeper_nodes': ZOOKEEPER_NODE_LIST,
    'zookeeper_base': KAFKA_ZOOKEEPER_BASE,
    'flush_interval_ms': int(os.environ.get('FLUSH_INTERVAL_MS', 10000)),
    'flush_interval_msgs': int(os.environ.get('FLUSH_INTERVAL_MSGS', 10000)),
    'num_threads': int(os.environ.get('NUM_THREADS', 8)),
Example #8
0
KAFKA_LOGGING_TEMPLATE = """# Log4j configuration, logs to rotating file
log4j.rootLogger=INFO,R

log4j.appender.R=org.apache.log4j.RollingFileAppender
log4j.appender.R.File=/var/log/%(service_name)s/%(container_name)s.log
log4j.appender.R.MaxFileSize=100MB
log4j.appender.R.MaxBackupIndex=10
log4j.appender.R.layout=org.apache.log4j.PatternLayout
log4j.appender.R.layout.ConversionPattern=%(log_pattern)s
"""

replication = min(int(os.environ.get("REPLICATION", 2)), len(get_node_list("kafka")))
# Generate the Kafka configuration from the defined environment variables.
config_model = {
    "node_name": get_container_name(),
    "broker_id": int(os.environ.get("BROKER_ID", 0)),
    "host_address": get_container_host_address(),
    "broker_port": get_port("broker", 9092),
    # Default log directory is /var/lib/kafka/logs.
    "log_dirs": os.environ.get("LOG_DIRS", "/var/lib/kafka/logs"),
    "num_partitions": int(os.environ.get("NUM_PARTITIONS", 8)),
    # Default retention is 7 days (168 hours).
    "retention_hours": int(os.environ.get("RETENTION_HOURS", 168)),
    # Default retention is only based on time.
    "retention_bytes": int(os.environ.get("RETENTION_BYTES", -1)),
    # Segment size (default is 0.5GB)
    "log_segment_bytes": int(os.environ.get("LOG_SEGMENT_BYTES", 536870912)),
    # Minimum interval between rolling new log segments (default 1 week)
    "log_roll_hours": int(os.environ.get("LOG_ROLL_HOURS", 24 * 7)),
    "zookeeper_nodes": ZOOKEEPER_NODE_LIST,
    ports."""
    return '{}:{}:{}'.format(
        get_specific_host(get_service_name(), name),
        get_specific_port(get_service_name(), name, 'peer'),
        get_specific_port(get_service_name(), name, 'leader_election'))

if os.environ.get('ZOOKEEPER_SERVER_IDS'):
    servers = os.environ['ZOOKEEPER_SERVER_IDS'].split(',')
    for server in servers:
        node, id = server.split(':')
        newline = 'server.{}={}'.format(id, build_node_repr(node))
        with open( ZOOKEEPER_CONFIG_FILE , "a") as myfile:
            myfile.write(newline)
            myfile.write("\n")
#        conf['server.{}'.format(id)] = build_node_repr(node)
        if node == get_container_name():
            ZOOKEEPER_NODE_ID = id
#os.mkdir(os.environ.get('ZOOKEEPER_DATADIR'))
myid_path = os.path.join(os.environ.get('ZOOKEEPER_DATADIR'), 'myid')
if os.path.isfile(myid_path):
    os.remove(myid_path)
with open(myid_path, 'w+') as f:
        f.write('%s\n' % ZOOKEEPER_NODE_ID)

with open (ZOOKEEPER_CONFIG_FILE, "r") as myfile:
    data=myfile.read()
    sys.stderr.write(data)

with open(ZOOKEEPER_LOG_CONFIG_FILE, 'w+') as f:
    f.write("""# Log4j configuration, logs to rotating file
log4j.rootLogger=INFO,R
def fix_collectd_file():
    update_in_file('/etc/collectd/collectd.conf', '%%%HOSTNAME%%%', get_service_name() + '.' + get_container_name())