Example #1
0
class DockerEventManager(Thread):

    def __init__(self, manager, url=None):
        super(DockerEventManager, self).__init__()

        self.manager = manager
        self.url = url

        self.daemon = True

        self.client = Client(self.url)

    def run(self):
        for payload in self.client.events():
            event = loads(payload)
            status = event.pop("status")
            docker_event = DOCKER_EVENTS.get(status)
            if docker_event is not None:
                self.manager.fire(docker_event(**event), "docker")
            else:
                print(
                    "WARNING: Unknown Docker Event <{0:s}({1:s})>".format(
                        status, repr(event)
                    ),
                    file=sys.stderr
                )

    def stop(self):
        self.client.close()
class DockerUtils:

    def __init__(self, socketPath):
        self._cli = Client(base_url=socketPath, version='auto')

    def getStats(self, containerName):
        stats_obj = self._cli.stats(containerName, True, True)
        return stats_obj

    def getInfo(self, containerId):
        info_obj = self._cli.inspect_container(container=containerId)
        return info_obj

    def getEvents(self, filters):
        events_obj = self._cli.events(filters= filters, decode=True)
        return events_obj

    def getLogs(self, containerName, streamLog):
        log_string = self._cli.logs(container = containerName, stream = streamLog)
        return log_string

    def getRegexpFromLogs(self, log_string, regularExpression, group):
        match = re.search(r""+regularExpression, log_string, re.MULTILINE)
        if match:
            return match.group(group)
        else:
            return None
Example #3
0
def main():
    # register the exit signals
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    args = parse_args()
    global hosts_path
    hosts_path = args.file

    docker = Client(base_url='unix://%s'%args.socket)
    events = docker.events(decode=True)
    #get running containers
    for c in docker.containers(quiet=True,all=False):
        container_id = c["Id"]
        container = get_container_data(docker, container_id)
        hosts[container_id] = container

    update_hosts_file()

    #listen for events to keep the hosts file updated
    for e in events:
        status = e["status"];
        if status =="start":
            container_id = e["id"]
            container = get_container_data(docker, container_id)
            hosts[container_id] = container
            update_hosts_file()

        if status=="stop" or status=="die" or status=="destroy":
            container_id = e["id"]
            if container_id in hosts:
                hosts.pop(container_id)
                update_hosts_file()
def main():
    cli = Client(base_url="unix://var/run/docker.sock")
    hosts = {"static": OrderedDict(), "dynamic": OrderedDict()}

    for container in cli.containers():
        data = container["Image"].split("/")

        if len(data) != 2:
            continue

        host, name = data

        if host == "tellendil":
            if name in ["static", "dynamic"]:
                hosts[name][container["Id"]] = container["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]

    update_conf(safe_get(hosts, "static"), safe_get(hosts, "dynamic"))

    for event in cli.events(decode=True):
        data = event.get("from", "").split("/")

        if len(data) != 2:
            continue

        host, name = data

        if event["Action"] == "die" and host == "tellendil":
            if name in ["static", "dynamic"]:
                hosts[name].pop(event["id"])
                update_conf(safe_get(hosts, "static"), safe_get(hosts, "dynamic"))

        elif event["Action"] == "start" and host == "tellendil":
            if name in ["static", "dynamic"]:
                hosts[name][event["id"]] = cli.inspect_container(event["id"])["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
                update_conf(safe_get(hosts, "static"), safe_get(hosts, "dynamic"))
Example #5
0
class DockerUtils:
    def __init__(self, socketPath):
        self._cli = Client(base_url=socketPath, version='auto')

    def getStats(self, containerName):
        stats_obj = self._cli.stats(containerName, False, False)
        return stats_obj

    def getInfo(self, containerId):
        info_obj = self._cli.inspect_container(container=containerId)
        return info_obj

    def getEvents(self, filters):
        events_obj = self._cli.events(filters=filters, decode=True)
        return events_obj

    def getLogs(self, containerName, streamLog):
        log_string = self._cli.logs(container=containerName, stream=streamLog)
        return log_string

    def getRegexpFromLogs(self, log_string, regularExpression, group):
        match = re.search(r"" + regularExpression, log_string, re.MULTILINE)
        if match:
            return match.group(group)
        else:
            return None
Example #6
0
class DockerEventManager(Thread):
    def __init__(self, manager, url=None):
        super(DockerEventManager, self).__init__()

        self.manager = manager
        self.url = url

        self.daemon = True

        self.client = Client(self.url)

    def run(self):
        for payload in self.client.events():
            event = loads(payload)
            status = event.pop("status")
            docker_event = DOCKER_EVENTS.get(status)
            if docker_event is not None:
                self.manager.fire(docker_event(**event), "docker")
            else:
                print("WARNING: Unknown Docker Event <{0:s}({1:s})>".format(
                    status, repr(event)),
                      file=sys.stderr)

    def stop(self):
        self.client.close()
Example #7
0
def sync(docker_client: docker.Client, zk: KazooClient, node_path: str,
         advertise_name: str):
    events = docker_client.events()
    for event_str in events:
        if end:
            break
        event = json.loads(event_str.decode('utf-8'))

        if 'Type' in event and event['Type'] == 'container' and 'id' in event:
            container_id = event['id']
            container_zk_path = "{}/{}".format(node_path, container_id)

            if 'Action' in event:
                if event['Action'] == 'destroy':
                    update_data(zk, container_zk_path, {}, advertise_name)
                else:
                    try:
                        container = docker_client.inspect_container(
                            container_id)
                        update_data(zk, container_zk_path, container,
                                    advertise_name)
                    except docker.errors.APIError:
                        pass
                    except requests.exceptions.HTTPError:
                        pass
Example #8
0
def main():
    cli = Client(base_url='unix://var/run/docker.sock')
    for event in cli.events(decode=True):
        eventStatus = get_status(event)
        if eventStatus in ['create', 'die']:
            newImgHash = getNewContImageHash(cli, event)
            print(newImgHash)
            extendPCR(newImgHash)
Example #9
0
 def loop(self):
     if not Client: raise ImportError('could not import docker client')
     docker = Client(base_url=self._docker_url)
     logger.info("waiting for events ...")
     for event in docker.events(decode=True):
         # it looks like this {"status":"die","id":"123","from":"foobar/eggs:latest","time":1434047926}
         if event['status'] in self._watch_set:
             docker_inspects=[docker.inspect_container(cn['Id']) for cn in docker.containers()]
             self.reconfig(docker_inspects)
Example #10
0
def main():
    cli = Client(base_url='unix://var/run/docker.sock')
    with daemon.DaemonContext():
        for event in cli.events(decode=True):
            eventStatus = get_status(event)
            if eventStatus in ['create', 'die']:
                newImgHash = getNewContImageHash(cli, event)
                extendPCR(newImgHash)
                performAction(eventStatus)
Example #11
0
def ddns():
    client = Client(base_url='unix://var/run/docker.sock')
    for event in client.events():
        event = json.loads(event)
        if event['status'] == 'start':
            container = client.inspect_container(event['id'])
            name = container['Name']
            ip = container['NetworkSettings']['IPAddress']
            update(name[1:], ip)
Example #12
0
 def loop(self):
     if not Client: raise ImportError('could not import docker client')
     docker = Client(base_url=self._docker_url)
     logger.info("waiting for events ...")
     for event in docker.events(decode=True):
         # it looks like this {"status":"die","id":"123","from":"foobar/eggs:latest","time":1434047926}
         if event['status'] in self._watch_set:
             docker_inspects = [
                 docker.inspect_container(cn['Id'])
                 for cn in docker.containers()
             ]
             self.reconfig(docker_inspects)
Example #13
0
class DockerManager:
    def __init__(self):
        self.cli = Client(base_url='unix://var/run/docker.sock')
        self.events = self.cli.events()

    def run(self):
        while True:
            message = self.events.next()
            try:
                self.handler(json.loads(message))
            except Exception, e:
                traceback.print_exc()
def event_stream(images=['redis1', ]):
    """
    Read events from docker's event stream and call the container monitor
    """
    client = Client(base_url='unix://var/run/docker.sock')

    logging.info('Listening for docker events...')

    try:
        for event in client.events():
            event = json.loads(event)
            container_monitor(event)
    except KeyboardInterrupt:
        sys.exit(0)
Example #15
0
def listener():
    '''
    Listen to docker events, and invoke event_handler if a
    container was started.
    '''
    log.info('Listening for Docker events')
    cli = Client(version='auto')
    for event in cli.events():
        event = json.loads(event.decode('utf-8'))
        if event.get('status') == 'start':
            log.info('Event - Container starting: %s', event)
            try:
                event_handler(event)
            except Exception as event_exception:
                notifier(False, str(event_exception))
                continue
Example #16
0
def event_stream(images=[
    'redis1',
]):
    """
    Read events from docker's event stream and call the container monitor
    """
    client = Client(base_url='unix://var/run/docker.sock')

    logging.info('Listening for docker events...')

    try:
        for event in client.events():
            event = json.loads(event)
            container_monitor(event)
    except KeyboardInterrupt:
        sys.exit(0)
Example #17
0
class MonitorThread(Thread):
    def __init__(self, app, sock, dockerEvents):
        super(MonitorThread, self).__init__()

        self.app = app
        self.sock = sock
        self.dockerEvents = dockerEvents
        self.cli = Client(base_url=self.sock)

    def run(self):
        # Listen for Docker events
        for event in self.cli.events():
            event = json.loads(event.decode('utf-8'))
            if event.get("status") in self.dockerEvents:
                self.app.updateProxy()

    def stop(self):
        self.cli.close()
Example #18
0
def listen():
    logging.info("Eveli is alive!")

    cli = Client(base_url='unix:///var/run/docker.sock')

    # Just in case, run the networks-fixer at startup as well
    ensure_networks(cli)

    # Could use decorators to register the event handlers when we get more of them
    event_handlers = [handler_nginx_reload, handler_compose_networks]

    for event in cli.events():
        event = json.loads(event.decode('utf-8'))
        for handler in event_handlers:
            try:
                handler(cli, event)
            except:
                logging.exception("Event handler %s failed", handler.__name__)
def event_producer(docker_url, queue, monitor, events):
    try:
        client = Client(base_url=docker_url, version="auto")

        if events:
            filters = dict(event=events)
        else:
            filters = None

        for raw_event in client.events(decode=True, filters=filters):
            log.debug("Received event %s", raw_event)

            event = DotAccessDict(time=raw_event['time'],
                                  container_id=raw_event['id'],
                                  status=raw_event['status'],
                                  image=raw_event.get('from'),
                                  details={})

            if raw_event['status'] != 'destroy':
                try:
                    raw = client.inspect_container(raw_event['id'])
                except Exception as e:
                    log.error("can't get container details for %s: %s",
                              raw_event['id'], e)
                    raw = {}

                event['details'] = raw

            if event['details']:
                event['name'] = event['details']['Name'].replace('/', '', 1)
            else:
                event['name'] = event['container_id']

            event = add_dot_access(event)

            queue.put(("ev", event))

    except Exception as e:
        log.error("Error contacting docker daemon: %s", e, exc_info=True)

    finally:
        monitor.set()
def event_producer(docker_url, queue, monitor, events):
    try:
        client = Client(base_url=docker_url, version="auto")

        if events:
            filters = dict(event=events)
        else:
            filters = None

        for raw_event in client.events(decode=True, filters=filters):
            log.debug("Received event %s", raw_event)

            event = DotAccessDict(
                time=raw_event['time'],
                container_id=raw_event['id'],
                status=raw_event['status'],
                image=raw_event.get('from'),
                details={})

            if raw_event['status'] != 'destroy':
                try:
                    raw = client.inspect_container(raw_event['id'])
                except Exception as e:
                    log.error("can't get container details for %s: %s", raw_event['id'], e)
                    raw = {}

                event['details'] = raw

            if event['details']:
                event['name'] = event['details']['Name'].replace('/', '', 1)
            else:
                event['name'] = event['container_id']

            event = add_dot_access(event)

            queue.put(("ev", event))

    except Exception as e:
        log.error("Error contacting docker daemon: %s", e, exc_info=True)

    finally:
        monitor.set()
Example #21
0
def main():
    """
    Main method.

    This method holds what you want to execute when
    the script is run on command line.
    """
    args = get_arguments()
    setup_logging(args)

    docker_server = Client(**(kwargs_from_env()))
    LOGGER.debug(
        ('Setting up a bind server with '
         'ip {} for domain {} and zone {}').format(args.server, args.domain,
                                                   args.zone))
    bind = BindServer(args.server, args.domain, args.key, args.zone)

    if args.register_running:
        LOGGER.info('Registering existing containers')
        containers = docker_server.containers()
        for configuration in containers:
            container = Container(configuration)
            LOGGER.debug(
                ('Registering container with '
                 'hostname {} and id {}').format(container.hostname,
                                                 container.id))
            bind.register(container)

    for event in docker_server.events():
        data = json.loads((event.decode('utf-8')))
        status = data.get('status', False)
        if status in ('start', 'destroy', 'die'):
            LOGGER.debug('Got status {}'.format(status))
            details = docker_server.inspect_container(data.get('id'))
            container = Container(details)
            if status == 'start':
                LOGGER.info('Trying to add {}'.format(container.hostname))
                bind.register(container)
            elif status in ('destroy', 'die'):
                LOGGER.info('Trying to delete {}'.format(container.hostname))
                bind.delete(container)
def main():
	PORT = int(os.environ['HTTP_PORT'])
	Handler = http.server.SimpleHTTPRequestHandler
	httpd = socketserver.TCPServer(("", PORT), Handler)
	print("serving at port", PORT)
	server = Thread(target=httpd.serve_forever)
	server.start()
	monitors = {}
	cli = Client(base_url='unix://var/run/docker.sock')
	events = cli.events(since=0,decode=True)
	for e in events:
		if 'id' not in e:
			print('WARNIG: id not in e',e)
			continue
		cid = e['id']
		status = e['status']
		if status in ['start']:
			print('Starting Monitor on',cid)
			if cid in monitors:
				print('WARNIG: Unexpected cid')
				continue
			stop = Event()
			mon = Monitor(cid,stop)
			monitors[cid] = (mon,stop)
			mon.start()
		elif status in ['die']:
			print('Stoping Monitor on',cid)
			if cid not in monitors:
				print('WARNIG: Unexpected cid')
				continue
			(mon,stop) = monitors[cid]
			stop.set()
			mon.join(1)
			print('Stopped')
			del monitors[cid]
		elif status in ['create','destroy','kill','stop','commit','delete']:
			print(status)
			pass
		else:
			print('WARNIG: Unexpected status:',e)
	pass
Example #23
0
def main():
    cli = Client(base_url="unix://var/run/docker.sock")
    hosts = {"static": OrderedDict(), "dynamic": OrderedDict()}

    for container in cli.containers():
        data = container["Image"].split("/")

        if len(data) != 2:
            continue

        host, name = data

        if host == "tellendil":
            if name in ["static", "dynamic"]:
                hosts[name][container["Id"]] = container["NetworkSettings"][
                    "Networks"]["bridge"]["IPAddress"]

    update_conf(safe_get(hosts, "static"), safe_get(hosts, "dynamic"))

    for event in cli.events(decode=True):
        data = event.get("from", "").split("/")

        if len(data) != 2:
            continue

        host, name = data

        if event["Action"] == "die" and host == "tellendil":
            if name in ["static", "dynamic"]:
                hosts[name].pop(event["id"])
                update_conf(safe_get(hosts, "static"),
                            safe_get(hosts, "dynamic"))

        elif event["Action"] == "start" and host == "tellendil":
            if name in ["static", "dynamic"]:
                hosts[name][event["id"]] = cli.inspect_container(
                    event["id"]
                )["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
                update_conf(safe_get(hosts, "static"),
                            safe_get(hosts, "dynamic"))
Example #24
0
def dockerClient():
    global client, events

    client = Client(base_url='unix://var/run/docker.sock', version='auto')
    events = client.events(decode=True)
    return binds


# --- Run loop

# Connect to docker via docker.sock or TCP
if os.environ.get('DOCKER_HOST') is not None:
    kwargs = kwargs_from_env()
    kwargs['tls'].assert_hostname = False
    cli = Client(**kwargs)
else:
    cli = Client(base_url="unix://var/run/docker.sock")

# Blocking event loop
try:
    for event in cli.events(decode=True):

        # On tag events check for running containers running this image
        if event['Action'] == "tag":
            imageId = event['Actor']['ID']
            for container in cli.containers():
                name = event['Actor']['Attributes']['name'].replace(":latest", "")
                nameShort = event['Actor']['Attributes']['name']
                container_data = cli.inspect_container(container["Id"])

                # On container found remove the container and reload it preserving env, ports, and mappings
                if container_data["Config"]["Image"] == nameShort or container_data["Config"]["Image"] == name:
                    print("reloading %s\n" % event['Actor']['Attributes']['name'])

                    cli.remove_container(container["Id"], force=True)
                    result = cli.create_container(
Example #26
0
import os
import json
import sys
import logging

import etcd
from docker import Client

cli = Client(base_url='unix:///var/run/docker.sock')

events = cli.events(decode=True)

logging.basicConfig(level=logging.DEBUG)

etcd_hostname = 'etcd'
etcd_client = etcd.Client(host=etcd_hostname)

def get_container(message):
    container = message.get('Actor')
    container['Attributes']['ID'] = container['ID']
    return container['Attributes']

def get_envvar(container, to_find):
    container_details = cli.inspect_container(container['ID'])
    env = container_details['Config']['Env']
    for envvar in env:
      if envvar.startswith(to_find + '='):
       return envvar.split('=')[1]
    return None

def get_container_hostname(container):
Example #27
0
    vhosts = dict()
    for container in c.containers():
        inspect = c.inspect_container(container['Id'])
        for env in inspect['Config']['Env']:
            if '=' in env:
                item, value = env.split('=', 2)
                if item == 'VIRTUAL_HOST':
                    if ':' in value:
                        vhost, port = value.split(':', 2)
                    else:
                        vhost = value
                        if 'Ports' in container and len(container) > 0 and 'PublicPort' in container['Ports'][0]:
                            port = container['Ports'][0]['PublicPort']
                        else:
                            port = None
                    if port:
                        if vhost not in vhosts: vhosts[vhost] = []
                        vhosts[vhost].append(port)
    return vhosts

if __name__ == "__main__":
    c = Client(base_url=DOCKER_SOCKET)
    for e in c.events():
        event = json.loads(e)
        if event['status'] in [ 'start', 'stop', 'die' ]:
            data['vhosts'] = get_vhosts()
            try:
                requests.post(API_URL, json = data)
            except Exception, e:
                print e.message
Example #28
0
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    filename=(args.log_file if args.log_file != '-' else None))

if args.zone is None:
    args.zone = args.domain

logging.info("Starting with arguments %s", args)

c = Client(**(kwargs_from_env()))

resolver = Resolver()
resolver.nameservers = [args.server]

if args.catchup:
    logging.info("Registering existing containers")
    containers = c.containers()
    for container in containers:
        register_container(container["Id"])

events = c.events(decode=True)

for event in events:
    logging.debug("Event message: %s", repr(event))
    if event['Action'] == "start":
        register_container(event['id'])
    elif event['Action'] == "die":
        remove_container(event['id'])

# 2014-11-28T15:32:04.000000000+01:00 a3d66b00acc9adbdbdbc91cc664d2d94b6a07cc4295c5cf54fcc595e2aa92a43: (from mongo:latest) restart
# 2015-03-05T08:36:14.000000000+01:00 eb75c1a5ad836d008b0fd66bf6b1ea353510175e8caa619e59d9851029b1ceca: (from ggtools/zabbix-server:latest) exec_start: ifconfig eth0
Example #29
0
def dock_events(api):
    cli = Client(base_url='tcp://'+api['docker-ip'] +':2375')
    return cli.events()
Example #30
0
class Agent(object):
    """
    Collects stats from all containers on a single Docker host, appending
    container name and id fields and publishing to redis
    params:
     - docker_host(str): full base_url of a Docker host to connect to.
                  (e.g. 'tcp://127.0.0.1:4243')
     - redis_host(str): redis host to connect to. default 127.0.0.1
     - redis_port(int): port to connect to redis host on. default 6379
    """
    def __init__(self,docker_host,redis_host='127.0.0.1',redis_port=6379):
        self.docker     = Client(base_url=docker_host)
        self.source     = self.docker.info()['Name']
        self.ncpu       = self.docker.info()['NCPU']
        self.redis      = StrictRedis(host=redis_host,port=redis_port,db=0)
        self.children   = []
        self.stopped    = False

        log.info('Connected to Docker API at url %s' % docker_host)
        output('starting collector on source %s' % self.source)
        self.start()

    def start(self):
        signal.signal(signal.SIGINT, self._sig_handler)
        #start a collector for all existing containers
        for cid in [ c['Id'] for c in self.docker.containers() ]:
            self._add_collector(cid)

        #start event listener
        self._event_listener()

    def _sig_handler(self, signal, frame):
        self.stopped = True
        sys.exit(0)

    def _event_listener(self):
        """
        Listen for docker events and dynamically add or remove
        stat collectors based on start and die events
        """
        output('started event listener')
        for event in self.docker.events():
            event = json.loads(event.decode('utf-8'))
            if event['status'] == 'start':
                self._add_collector(event['id'])
            if event['status'] == 'die':
                self._remove_collector(event['id'])

    def _collector(self,cid,cname):
        """
        Collector instance collects stats via Docker API streaming web socket,
        appending container name and source, and publishing to redis
        params:
         - cid(str): ID of container to collect stats from
         - cname(str): Name of container
        """
        sleep(5) # sleep to allow container to fully start
        output('started collector for container %s' % cid)
        stats = self.docker.stats(cid, decode=True)
        for stat in stats:
            #append additional information to the returned stat
            stat['container_name'] = cname
            stat['container_id'] = cid
            stat['source'] = self.source
            stat['ncpu'] = self.ncpu
            self.redis.publish('statsquid', msgpack.packb(stat))
            if self.stopped:
                break
    
    #####
    # collector methods
    #####

    def _add_collector(self,cid):
        log.debug('creating collector for container %s' % cid)
        cname = self.docker.inspect_container(cid)['Name'].strip('/')

        p = Process(target=self._collector,name=cid,args=(cid,cname))
        p.start()

        self.children.append(p)

    def _remove_collector(self,cid):
        c = self._get_collector(cid)
        c.terminate()
        while c.is_alive():
            sleep(.2)
        output('collector stopped for container %s' % cid)
        self.children = [ c for c in self.children if c.name != cid ]

    def _get_collector(self,cid):
        return [ p for p in self.children if p.name == cid ][0]
Example #31
0
class ECSIDMapAgent():
    def __init__(self, server_endpoint, log_level):
        self.id_map = {}
        self.new_id_map = {}
        self.server_endpoint = server_endpoint
        self.logger = self._setup_logger(log_level)
        self.backoff_time = 2
        self.current_backoff_time = self.backoff_time
        self.current_retry = 0
        self.max_retries = 2
        self.hostname = gethostname()
        self.instance_ip = self.get_instance_metadata('local-ipv4')  # set these at object constr. as they don't change
        self.instance_id = self.get_instance_metadata('instance-id')
        self.instance_type = self.get_instance_metadata('instance-type')
        self.instance_az = self.get_instance_metadata('placement/availability-zone')
        self.docker_client = Client(base_url='unix://var/run/docker.sock', version='1.21')

    @staticmethod
    def _setup_logger(log_level):
        logger = logging.getLogger('ecs_id_mapper_agent')
        logger.setLevel(log_level.upper())
        logger.propagate = False
        stderr_logs = logging.StreamHandler()
        stderr_logs.setLevel(getattr(logging, log_level))
        stderr_logs.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
        logger.addHandler(stderr_logs)
        return logger

    def _retry(self):
        self.logger.debug('backoff: {} retry: {} max_retry {}'.format(self.current_backoff_time,
                                                                      self.current_retry, self.max_retries))
        if self.current_retry >= self.max_retries:
            self.logger.info('Max _retry reached. Aborting')
            self.current_retry = 0
            self.current_backoff_time = self.backoff_time
            return False
        else:
            self.current_retry += 1
            self.logger.info('Sleeping for {} seconds'.format(str(self.current_backoff_time)))
            time.sleep(self.current_backoff_time)
            self.current_backoff_time **= 2
            return True

    def _http_connect(self, url, timeout=1):
        self.logger.debug('Making connection to: {}'.format(url))
        while True:
            try:
                r = requests.get(url, timeout=timeout)
                return r
            except requests.exceptions.ConnectionError:
                self.logger.error('Connection error accessing URL {}'.format(str(url)))
                if not self._retry():
                    return None
            except requests.exceptions.Timeout:
                self.logger.error(
                    'Connection timeout accessing URL {}. Current timeout value {}'.format(url, str(timeout)))
                if not self._retry():
                    return None

    def get_instance_metadata(self, path):
        self.logger.info('Checking instance metadata for {}'.format(path))
        metadata = self._http_connect('http://169.254.169.254/latest/meta-data/{}'.format(path))
        if metadata:
            return metadata.text
        else:
            return ""

    @staticmethod
    def get_container_ports(container_id):
        try:
            cmd = ["/usr/bin/docker", "port", container_id[:12]]
            p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            output, errors = p.communicate()
            if errors or len(output) < 1:
                return "0", "0"
            cport, hport = output.split("-")
            cport = cport.split('/')[0]
            hport = hport.split(':')[1].strip()
            return cport, hport
        except (OSError, ValueError):
            return "0", "0"

    def get_ecs_agent_tasks(self):
        self.logger.info('Requesting data from ECS agent')
        ecs_agent_tasks_response = self._http_connect('http://127.0.0.1:51678/v1/tasks')
        ecs_agent_metadata_response = self._http_connect('http://127.0.0.1:51678/v1/metadata')

        if ecs_agent_tasks_response and ecs_agent_metadata_response:
            ecs_agent_tasks = ecs_agent_tasks_response.json()
            ecs_agent_metadata = ecs_agent_metadata_response.json()
        else:
            ecs_agent_tasks = None
            ecs_agent_metadata = None
            return False
        id_map = {}
        cluster_name = ecs_agent_metadata['Cluster']
        ecs_agent_version = ecs_agent_metadata['Version']
        for task in ecs_agent_tasks['Tasks']:
            task_id = str(task['Arn'].split(":")[-1][5:])
            desired_status = str(task['DesiredStatus'])
            known_status = str(task['KnownStatus'])
            task_name = str(task['Family'])
            task_version = str(task['Version'])
            for container in task['Containers']:
                docker_id = str(container['DockerId'])
                if desired_status == "RUNNING":
                    container_port, instance_port = self.get_container_ports(docker_id)
                else:
                    container_port, instance_port = "0", "0"
                container_name = str(container['Name'])
                pkey = hashlib.sha256()
                pkey.update(docker_id)
                pkey.update(task_id)
                pkey.update(desired_status)
                id_map[pkey.hexdigest()] = {'container_id': docker_id,
                                            'container_name': container_name,
                                            'container_port': container_port,
                                            'task_id': task_id,
                                            'task_name': task_name,
                                            'task_version': task_version,
                                            'instance_port': instance_port,
                                            'instance_ip': self.instance_ip,
                                            'instance_id': self.instance_id,
                                            'instance_type': self.instance_type,
                                            'instance_az': self.instance_az,
                                            'desired_status': desired_status,
                                            'known_status': known_status,
                                            'host_name': self.hostname,
                                            'cluster_name': cluster_name,
                                            'ecs_agent_version': ecs_agent_version,
                                            'sample_time': time.time()}
        # Update internal state
        self.new_id_map = copy.deepcopy(id_map)

    def report_event(self, event_id, action):
        for id in event_id:
            self.logger.info('Reporting new container event. Action {}. Event id: {}'.format(action, id))
            headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
            while True:
                try:
                    r = requests.post(path.join(self.server_endpoint, 'report/event'),
                                      headers=headers,
                                      data=json.dumps({'event_id': id, 'event': action, 'timestamp': time.time()}))
                    self.logger.debug("HTTP response: " + str(r.status_code))
                    break
                except requests.exceptions.ConnectionError:
                    self.logger.info('Unable to connect to server endpoint. Sleeping for {} seconds'.format(
                        str(self.current_backoff_time)))
                    if not self._retry():
                        break

    def report_map(self):
        self.logger.info('Reporting current id map')
        headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
        while True:
            try:
                r = requests.post(path.join(self.server_endpoint, 'report/map'),
                                  headers=headers, data=json.dumps(self.id_map))
                self.logger.debug("HTTP response: " + str(r.status_code))
                break
            except requests.exceptions.ConnectionError:
                self.logger.info('Unable to connect to server endpoint. Sleeping for {} seconds'.format(
                    str(self.current_backoff_time)))
                if not self._retry():
                    break

    def compare_hash(self):
        self.logger.info('Comparing known state to current state')
        containers_added = set(self.new_id_map.keys()) - set(self.id_map.keys())
        containers_removed = set(self.id_map.keys()) - set(self.new_id_map.keys())
        if len(containers_added) > 0:
            self.logger.info('Containers added {}'.format(containers_added))
            self.report_event(containers_added, 'added')
        if len(containers_removed) > 0:
            self.logger.info('Containers removed {}'.format(containers_removed))
            self.report_event(containers_removed, 'removed')
        if len(containers_removed) > 0 or len(containers_added) > 0:
            self.id_map = copy.deepcopy(self.new_id_map)
            self.report_map()
        else:
            self.logger.info('No container actions to report')

    def run(self):
        """
        Blocking method to run agent
        :return:
        """
        self.logger.info('Starting agent')
        for event in self.docker_client.events(decode=True):
            self.logger.debug(str(event))
            if event['status'] == 'start' or event['status'] == 'die':
                agent.get_ecs_agent_tasks()
                agent.compare_hash()
Example #32
0
class Watcher(object):
    """
    The watcher that will be observing the docker events
    """
    def __init__(self):
        """
        the watcher will be created with a docker-py client
        and a set of filters for event watching
        :return:
        """
        self.client = Client()
        self.filters = {
            'event': ['start', 'restart', 'die', 'stop'],
            'type': 'container'
        }
        self.host = Host('docker')

    def process_event(self, event):
        """
        the processing of the event, the event will be a json encoded raw string
        representation of the event
        :param event:
        :return:
        """
        self.host.reset()
        self.host_entries()
        self.host.save()
        self.create_host_file()

    def create_host_file(self):
        """

        :return:
        """
        f = open('/hosts', 'w+')
        f.write(self.base + "\n" + str(self.host))
        f.close()

    def start(self):
        """
        starts the observer and the event stream for processing
        :return:
        """
        with open('/hosts.d/base', 'r+') as fl:
            self.base = fl.read()
        self.host_entries()
        self.host.save()
        self.create_host_file()
        for event in self.client.events(filters=self.filters):
            self.process_event(event)

    def process_container_host(self, container):
        """

        :param container:
        :return:
        """
        details = self.client.inspect_container(container['Id'])
        hostname = details['Config']['Hostname']
        if details['Config']['Domainname'] != "":
            hostname += '.'+details['Config']['Domainname']
        names = [name.split('/').pop() for name in container['Names']] + [hostname]
        for ip in map(lambda (k, v): v['IPAddress'], details['NetworkSettings']['Networks'].iteritems()):
            for name in names:
                self.host.add(ip, name)
Example #33
0
def main(arguments):
	logging.basicConfig(level=logging.INFO)
	# retrieve configuration file with default values
	config_parser = argparse.ArgumentParser(add_help=False)
	config_parser.add_argument('-c', '--config', nargs='?')
	(config_arg, arguments) = config_parser.parse_known_args(arguments)
	defaults = dict(DEFAULTS)
	if config_arg.config is not None:
		if os.path.isfile(config_arg.config):
			try:
				configp = ConfigParser.SafeConfigParser()
				configp.read(config_arg.config)
				defaults.update(dict(configp.items('docker-listen')))
			except Exception:
				logging.exception('File %s can not be read', config_arg.config)
		else:
			logging.warn('File %s can not be read', config_arg.config)

	# fix boolean value
	if defaults['systemctl_enabled'] in ('True', 'yes', '1'):
			defaults['systemctl_enabled'] = True
			defaults['sighup_enabled'] = False
	elif defaults['sighup_enabled'] in ('True', 'yes', '1'):
		defaults['systemctl_enabled'] = False
		defaults['sighup_enabled'] = True
	else:
		defaults['systemctl_enabled'] = False
		defaults['sighup_enabled'] = False
	# clean process name
	defaults['sighup_process_name'] = defaults['sighup_process_name'].replace('\'', '')
	defaults['systemctl_service_name'] = defaults['systemctl_service_name'].replace('\'', '')

	# retrieve configuration ; configuration file provides defaults values
	parser = argparse.ArgumentParser(description=__doc__,
									 formatter_class=argparse.RawDescriptionHelpFormatter)
	parser.add_argument('-c', '--config', nargs='?', metavar='FILE', help='load configuration from .ini config file (section docker-listen)')
	parser.add_argument('--docker-url', nargs='?', metavar='URL', help='docker socket path (unix://var/run/docker.sock) or docker url')
	parser.add_argument('--systemctl-enabled', nargs='?', type=bool, choices=('yes', 'no'), help='systemctl is enable ?')
	parser.add_argument('--systemctl-service-name', metavar='NAME', nargs='?', help='name of the service to restart')
	parser.add_argument('--sighup-enabled', nargs='?', type=bool, choices=('yes', 'no'), help='sighup process on events ?')
	parser.add_argument('--sighup-process-name', metavar='NAME', nargs='?', help='name of the process to sighup (with killall)')
	parser.add_argument('--hosts-dir', nargs='?', metavar='DIR_PATH', help='directory where hosts files are stored ; all files in this directory will be deleted')
	parser.add_argument('--log-level', nargs='?', choices=('DEBUG', 'INFO', 'WARN', 'ERROR'))
	logging.debug('Using defaults %s', pprint.pformat(defaults))
	parser.set_defaults(**defaults)
	configuration = parser.parse_args(arguments)

	# set logging level and start working
	logging.getLogger('').setLevel(configuration.log_level)
	logging.info('Current configuration : %s', pprint.pformat(configuration))

	# check configuration
	if not os.path.isdir(configuration.hosts_dir):
		logging.error('hosts_dir \'%s\' is not a directory. Stopping.', configuration.hosts_dir)
		return 2
	try:
		client = Client(base_url=configuration.docker_url)
		client.ping()
	except Exception:
		logging.exception('Error communicating with docker socket %s. Stopping.', configuration.docker_url)
		return 2
	logging.info('Docker-listen started')
	events = client.events(decode=True)
	os.umask(0000)
	clean_all(configuration)
	init_all(configuration, client)
	try:
		for event in events:
			logging.debug(event)
			if event['Action'] == 'connect':
				handle_start(configuration, client, event)
			elif event['Action'] == 'disconnect':
				handle_stop(configuration, client, event)
	except Exception:
		logging.exception('Error processing docker events. Stopping.')
		return 2
	return 0
Example #34
0
if __name__ == '__main__':

    docker = Client()
    hostname = getenv('HOSTNAME', None)
    if hostname:
        if '.' not in hostname:
            hostname = "%s%s" % (hostname, DOMAIN_SUFFIX)
        print "Adding %s" % hostname
        hosts = Hosts(HOSTS_PATH)
        my_ip = commands.getoutput("ip -4 -f inet -o addr show eth0 | awk '{print $4}' | cut -d/ -f1")
        hosts.set_one(hostname, my_ip)
        hosts.write(HOSTS_PATH)
        print "Go to http://%s/" % hostname

    for event in docker.events():
        event = json.loads(event)
        if 'status' not in event:
            continue
        if event['status'] == 'start':
            hostname = get_hostname(event['id'])
            if hostname is None:
                continue
            print "Adding %s" % hostname
            hosts = Hosts(HOSTS_PATH)
            hosts.set_one(hostname, get_ip(event['id']))
            hosts.write(HOSTS_PATH)
        elif event['status'] == 'die':
            hostname = get_hostname(event['id'])
            print "Removing %s" % hostname
            hosts = Hosts(HOSTS_PATH)
Example #35
0
class DockerSpy:
    """
        # Usage:
        docker = dockerspy(container_api_url='unix://var/run/docker.sock')
        # print docker information
        print(docker.describe_containers)
        print(docker.env(container='chargeback-sync'))
        print(docker.virtual_port(container='chargeback-sync'))
        print(docker.ip_address(container='chargeback-sync'))
        # listen to events from docker API, similar to docker events
        docker.events()
        # print all containers information as a dictionary structure
        docker.containers

    """
    def __init__(self,
                 container=None,
                 container_api_url='unix://var/run/docker.sock'):
        """
        :rtype: object
        :type dockerspy: object
        """
        self._client = Client(base_url=container_api_url)
        self.containers = {}
        if container:
            self.containers[container] = self._client.inspect_container(
                container=container)

        self.tagged_containers = {}

    def get_containers(self):
        """
        :rtype : list
        :return: containers
        """
        try:
            # list live containers names using list comprehension
            containers = [
                container['Names'][0].replace('/', '')
                for container in self._client.containers()
            ]
            return containers
        except Exception:
            raise

    @staticmethod
    def _into_dict(kv_config):
        env_dict = {}
        for item in kv_config:
            k, v = item.split('=', 1)
            env_dict[k] = v
        return env_dict

    @property
    def describe_containers(self):
        # update self.containers
        for name in self.get_containers():
            self.containers[name] = self._client.inspect_container(name)
            self.containers[name]['Config']['Env'] = self._into_dict(
                kv_config=self.containers[name]['Config']['Env'])

        return self.containers

    def events(self):
        """

        """
        try:
            return self._client.events(decode=True)
        except KeyboardInterrupt:
            pass

    def env(self, container=None):
        """

        :rtype: dict
        :return: container's environment variable
        """
        """
        # internal method to transform k=v environment variable into dict
        def _into_dict(kv_config):
            env_dict = {}
            for item in kv_config:
                k, v = item.split('=', 1)
                env_dict[k] = v
            return env_dict
        """
        # If self.cotnainers exists or grater then 0 return Config.Env
        if self.containers:
            return self.containers[container]['Config']['Env']
        else:
            return self.describe_containers[container]['Config']['Env']

    def virtual_port(self, container):
        # internal method to transform k=v environment variable into dict
        """
        def _into_dict(kv_config):
            env_dict = {}
            for item in kv_config:
                k, v = item.split('=', 1)
                env_dict[k] = v
            return env_dict
        """
        # return config env of a container in dictionary format
        if self.containers:
            return self.env(container)['VIRTUAL_PORT']
        else:
            return self.describe_containers[container]['Config']['Env'][
                'VIRTUAL_PORT']

    def ip_address(self, container):
        if self.containers:
            return self.containers[container]['NetworkSettings']['IPAddress']
        else:
            return self.describe_containers[container]['NetworkSettings'][
                'IPAddress']

    def memberof(self, service_name):
        pass
def dockerClient():
    global client, events

    client = Client(base_url='unix://var/run/docker.sock', version='auto')
    events = client.events(decode=True)
Example #37
0
class DockerSpy:
    """
        # Usage:
        docker = dockerspy(container_api_url='unix://var/run/docker.sock')
        # print docker information
        print(docker.describe_containers)
        print(docker.env(container='chargeback-sync'))
        print(docker.virtual_port(container='chargeback-sync'))
        print(docker.ip_address(container='chargeback-sync'))
        # listen to events from docker API, similar to docker events
        docker.events()
        # print all containers information as a dictionary structure
        docker.containers

    """
    def __init__(self, container=None, container_api_url='unix://var/run/docker.sock'):
        """
        :rtype: object
        :type dockerspy: object
        """
        self._client = Client(base_url=container_api_url)
        self.containers = {}
        if container:
            self.containers[container] = self._client.inspect_container(container=container)

        self.tagged_containers = {}

    def get_containers(self):
        """
        :rtype : list
        :return: containers
        """
        try:
            # list live containers names using list comprehension
            containers = [container['Names'][0].replace('/', '') for container in self._client.containers()]
            return containers
        except Exception:
            raise

    @staticmethod
    def _into_dict(kv_config):
        env_dict = {}
        for item in kv_config:
            k, v = item.split('=', 1)
            env_dict[k] = v
        return env_dict

    @property
    def describe_containers(self):
        # update self.containers
        for name in self.get_containers():
            self.containers[name] = self._client.inspect_container(name)
            self.containers[name]['Config']['Env'] = self._into_dict(kv_config=self.containers[name]['Config']['Env'])

        return self.containers

    def events(self):
        """

        """
        try:
            return self._client.events(decode=True)
        except KeyboardInterrupt:
            pass

    def env(self, container=None):
        """

        :rtype: dict
        :return: container's environment variable
        """
        """
        # internal method to transform k=v environment variable into dict
        def _into_dict(kv_config):
            env_dict = {}
            for item in kv_config:
                k, v = item.split('=', 1)
                env_dict[k] = v
            return env_dict
        """
        # If self.cotnainers exists or grater then 0 return Config.Env
        if self.containers:
            return self.containers[container]['Config']['Env']
        else:
            return self.describe_containers[container]['Config']['Env']

    def virtual_port(self, container):
        # internal method to transform k=v environment variable into dict
        """
        def _into_dict(kv_config):
            env_dict = {}
            for item in kv_config:
                k, v = item.split('=', 1)
                env_dict[k] = v
            return env_dict
        """
        # return config env of a container in dictionary format
        if self.containers:
            return self.env(container)['VIRTUAL_PORT']
        else:
            return self.describe_containers[container]['Config']['Env']['VIRTUAL_PORT']

    def ip_address(self, container):
        if self.containers:
            return self.containers[container]['NetworkSettings']['IPAddress']
        else:
            return self.describe_containers[container]['NetworkSettings']['IPAddress']

    def memberof(self, service_name):
        pass
Example #38
0
class Collector:

    def __init__(self):
        self.logger = logging.getLogger("Monitor")
        self.INFLUXDB = {}
        self.INFLUXDB['HOST'] = os.getenv("INFLUXDB_HOST","localhost")
        self.INFLUXDB['PORT'] = os.getenv("INFLUXDB_PORT","8086")
        self.INFLUXDB['USERNAME'] = os.getenv("INFLUXDB_USERNAME","root")
        self.INFLUXDB['PASSWORD'] = os.getenv("INFLUXDB_PASSWORD","root")
        self.INFLUXDB['DBNAME'] = os.getenv("INFLUXDB_DBNAME","autoscaling")
        self.INFLUXDB['SERIES'] = os.getenv("INFLUXDB_SERIES","monitoring")
        self.COLLECT_TIME_INTERVAL = int(os.getenv("COLLECT_TIME_INTERVAL","3"))
        self.BATH_TIME_INTERVAL = int(os.getenv("BATH_TIME_INTERVAL","10"))

        self.influxdb_client = InfluxDBClient(
            host=self.INFLUXDB['HOST'],
            port=self.INFLUXDB['PORT'],
            username=self.INFLUXDB['USERNAME'],
            password=self.INFLUXDB['PASSWORD'],
            database=self.INFLUXDB['DBNAME']
        )

        self.docker_client = Client(base_url='unix://var/run/docker.sock')
        self.data_bath = []
        self.current_data = 0

    def collecting(self, container_id):
        mesos_task_id = ""
        app_name = ""
        container_envs = self.docker_client.inspect_container(container_id)['Config']['Env']
        for env in container_envs:
            if env.startswith('MESOS_TASK_ID'):
                mesos_task_id = env.split('=')[1]
                index = mesos_task_id.rfind('.')
                app_name = mesos_task_id[:index]
                break
        while True:
            try:
                stat = self.docker_client.stats(container_id, decode="utf8", stream=False)
                cpu_usage = (stat["cpu_stats"]["cpu_usage"]["total_usage"] - stat["precpu_stats"]["cpu_usage"]["total_usage"])*100/math.pow(10,9)/len(stat["cpu_stats"]["cpu_usage"]["percpu_usage"])
                mem_usage = stat["memory_stats"]["usage"] / stat["memory_stats"]["limit"] * 100
                current_time = datetime.now().timestamp()
                data = [current_time, container_id, app_name, mesos_task_id, cpu_usage, mem_usage]
                self.logger.debug("Append: "+str(data))
                self.data_bath.append(data)
                time.sleep(self.COLLECT_TIME_INTERVAL)
            except errors.NotFound as e:
                self.logger.info("Container {} has gone away".format(container_id))
                break
            except Exception as e:
                self.logger.error("Error "+str(e))
                break


    def send_data(self):
        while True:
            try:
                time.sleep(self.BATH_TIME_INTERVAL)
                if self.data_bath:
                    data = dict()
                    data['name'] = self.INFLUXDB["SERIES"]
                    data['columns'] = ['time', 'container_id', 'app_uuid', 'mesos_task_id', "cpu_usage", "mem_usage"]
                    data['points'] = self.data_bath
                    self.data_bath = []
                    self.logger.info("Send data ...")
                    self.logger.debug(str(data))
                    self.influxdb_client.write_points([data])
            except Exception as e:
                self.logger.error("Error "+str(e))



    def run(self):
        with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
            executor.submit(self.send_data)
            containers = self.docker_client.containers()
            collectors = {}
            for container in containers:
                collectors[container["Id"]] = executor.submit(self.collecting, container["Id"])
            events = self.docker_client.events(decode=True)

            for event in events:
                if event["status"] == "start":
                    self.logger.info("Start collector for: "+event["id"])
                    collectors[event["id"]] = executor.submit(self.collecting, event["id"])
                elif event["status"] == "die":
                    try:
                        self.logger.info("Cancel collector for: "+event["id"])
                        collectors[event["id"]].cancel()
                    except Exception as e:
                        self.logger.debug("Exception when Cancel collector: {} : {}".format(str(e), event["id"]))
Example #39
0
def dock_events(api):
    cli = Client(base_url='tcp://' + api['docker-ip'] + ':2375')
    return cli.events()
Example #40
0
class DockerSwarmInterface:

    def __init__(self, swarm_url, swarm_tls_ca_cert, swarm_tls_cert,
                 swarm_tls_key, swarm_allow_insecure, app_info, netscaler):
        tls_config = False
        if not swarm_allow_insecure:
            if swarm_url.startswith("tcp"):
                swarm_url = swarm_url.replace("tcp", "https")
                logger.info("Using swarm url %s" % swarm_url)
            tls_config = tls.TLSConfig(client_cert=(swarm_tls_cert,
                                                    swarm_tls_key),
                                       verify=swarm_tls_ca_cert,
                                       assert_hostname=False)
        self.client = Client(base_url=swarm_url, tls=tls_config)
        self.app_info = app_info
        self.netskaler = netscaler
        self.lock = threading.Lock()

    def get_backends_for_app(self, app_label):
        logger.info("Getting backends for app label %s" % app_label)
        containers = self.client.containers(filters={'status': 'running',
                                                     'label': [app_label]})
        portConfigs = [n['Ports'] for n in containers]
        """
        [[{u'Type': u'tcp', u'PrivatePort': 443},
          {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 807, u'PrivatePort': 80}],
          [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 806, u'PrivatePort': 80},
          {u'Type': u'tcp', u'PrivatePort': 443}]]
        """
        result = []
        for ports in portConfigs:
            for port in ports:
                if port.get('PublicPort'):
                    # TODO: handle the case where more than one port is exposed
                    result.append((port['IP'], port['PublicPort']))

        return result

    def configure_ns_for_app(self, app_key, appname):
        self.lock.acquire()
        try:
            app_label = app_key + "=" + appname
            backends = self.get_backends_for_app(app_label)
            # backends = map(lambda y: ("192.168.99.100", y[1]), backends)
            # TODO: remove above for actual swarm. With plain docker machine,
            # host IP is "0.0.0.0" -- that cannot be load balanced. Docker
            # swarm supplies correct host IP.
            logger.debug("Backends are %s" % str(backends))
            self.netskaler.configure_app(appname,  backends)
        finally:
            self.lock.release()

    def configure_all(self):
        app_key = self.app_info['appkey']
        appnames = map(lambda x: x['name'], self.app_info['apps'])
        logger.info("Configuring for app names: %s" % str(appnames))
        for appname in appnames:
            self.configure_ns_for_app(app_key, appname)
        self.watch_all_apps()
        self.wait_for_all()

    def watch_app(self, app_key, appname):
        app_label = app_key + "=" + appname
        events = self.client.events(
            filters={"event": ["start", "kill", "die"],
                     "label": [app_label]})
        for e in events:
            evj = json.loads(e)
            status = evj['status']
            c_id = evj['id']
            if status in ['start', 'die', 'kill']:
                # TODO: BUG in docker swarm events does not actually apply
                # filters. Use 'docker ps' to verify the app that is changing
                # belongs to this thread
                logger.debug("Event status: %s, id: %.12s" %
                             (evj['status'], evj['id']))
                containers = self.client.containers(
                    all=True,
                    filters={"label": [app_label]})
                container_ids = [c.get('Id') for c in containers
                                 if c.get('Id') == c_id]
                if container_ids:
                    logger.info("Configuring NS for app %s, "
                                "container id=%.12s" % (appname, c_id))
                    self.configure_ns_for_app(app_key, appname)

    def watch_all_apps(self):
        app_key = self.app_info['appkey']
        appnames = map(lambda x: x['name'], self.app_info['apps'])
        for appname in appnames:
            logger.debug("Watching for events for app: %s" % str(appname))
            t = threading.Thread(target=self.watch_app,
                                 args=(app_key, appname,))
            t.start()

    def wait_for_all(self):
        main_thread = threading.currentThread()
        for t in threading.enumerate():
            if t is main_thread:
                continue
            logging.debug('joining %s', t.getName())
            t.join()
Example #41
0
import os
import json
import sys
import logging

import etcd

logging.basicConfig(level=logging.DEBUG)

etcd_hostname = 'etcd'
etcd_client = etcd.Client(host=etcd_hostname)

from docker import Client

client = Client(base_url='unix://var/run/docker.sock')
events = client.events(decode=True)

def get_container(message):
    container = message['Actor']['Attributes']
    id = message['id']
    container['Env'] = client.inspect_container(id)['Config']['Env']
    container['Id'] = id
    
    print(container)
    return container

def get_envvar(container, to_find):
    for envvar in container['Env']:
	envvar = envvar.split('=')
        if envvar[0] == to_find:
            return envvar[1]
def main(arguments):
    logging.basicConfig(level=logging.INFO)
    # retrieve configuration file with default values
    config_parser = argparse.ArgumentParser(add_help=False)
    config_parser.add_argument('-c', '--config', nargs='?')
    (config_arg, arguments) = config_parser.parse_known_args(arguments)
    defaults = dict(DEFAULTS)
    if config_arg.config is not None:
        if os.path.isfile(config_arg.config):
            try:
                configp = ConfigParser.SafeConfigParser()
                configp.read(config_arg.config)
                defaults.update(dict(configp.items('docker-listen')))
            except Exception:
                logging.exception('File %s can not be read', config_arg.config)
        else:
            logging.warn('File %s can not be read', config_arg.config)
    
    # fix boolean value
    if defaults['systemctl_enabled'] in ('True', 'yes', '1'):
            defaults['systemctl_enabled'] = True
            defaults['sighup_enabled'] = False
    elif defaults['sighup_enabled'] in ('True', 'yes', '1'):
        defaults['systemctl_enabled'] = False
        defaults['sighup_enabled'] = True
    else:
        defaults['systemctl_enabled'] = False
        defaults['sighup_enabled'] = False
    # clean process name
    defaults['sighup_process_name'] = defaults['sighup_process_name'].replace('\'', '')
    defaults['systemctl_service_name'] = defaults['systemctl_service_name'].replace('\'', '')

    # retrieve configuration ; configuration file provides defaults values
    parser = argparse.ArgumentParser(description=__doc__,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('-c', '--config', nargs='?', metavar='FILE', help='load configuration from .ini config file (section docker-listen)')
    parser.add_argument('--docker-url', nargs='?', metavar='URL', help='docker socket path (unix://var/run/docker.sock) or docker url')
    parser.add_argument('--systemctl-enabled', nargs='?', type=bool, choices=('yes', 'no'), help='systemctl is enable ?')
    parser.add_argument('--systemctl-service-name', metavar='NAME', nargs='?', help='name of the service to restart')
    parser.add_argument('--sighup-enabled', nargs='?', type=bool, choices=('yes', 'no'), help='sighup process on events ?')
    parser.add_argument('--sighup-process-name', metavar='NAME', nargs='?', help='name of the process to sighup (with killall)')
    parser.add_argument('--hosts-dir', nargs='?', metavar='DIR_PATH', help='directory where hosts files are stored ; all files in this directory will be deleted')
    parser.add_argument('--log-level', nargs='?', choices=('DEBUG', 'INFO', 'WARN', 'ERROR'))
    logging.debug('Using defaults %s', pprint.pformat(defaults))
    parser.set_defaults(**defaults)
    configuration = parser.parse_args(arguments)

    # set logging level and start working
    logging.getLogger('').setLevel(configuration.log_level)
    logging.info('Current configuration : %s', pprint.pformat(configuration))
    
    # check configuration
    if not os.path.isdir(configuration.hosts_dir):
        logging.error('hosts_dir \'%s\' is not a directory. Stopping.', configuration.hosts_dir)
        return 2
    try:
        client = Client(base_url=configuration.docker_url)
        client.ping()
    except Exception:
        logging.exception('Error communicating with docker socket %s. Stopping.', configuration.docker_url)
        return 2
    logging.info('Docker-listen started')
    events = client.events(decode=True)
    os.umask(0000)
    clean_all(configuration)
    init_all(configuration, client)
    try:
        for event in events:
            logging.debug(event)
            if event['Action'] == 'connect':
                handle_start(configuration, client, event)
            elif event['Action'] == 'disconnect':
                handle_stop(configuration, client, event)
    except Exception:
        logging.exception('Error processing docker events. Stopping.')
        return 2
    return 0
Example #43
0
class Dock2dns():

    def __init__(self, socket = None, etcd = None, domain = None, ttl = 0, retry = 5, sleep = 2, debug = None):
        self.socket = socket
        self.etcd = etcd
        self.domain = domain
        self.ttl = ttl
        self.retry = retry
        self.sleep = sleep
        self.client =  Client(base_url = self.socket)
        self.debug = debug

    def log(self,message):
        try:
            sys.stdout.write(message)
            sys.stdout.write("\n")
            sys.stdout.flush()
        except:
            pass

    def inspect(self, id):
        try:
            inspect = self.client.inspect_container(id)
            return inspect 
        except:
            return None

    def req(self, method, uri, data = None):

        payload = {}

        if data:
            payload["value"] = json.dumps(data)
        
        payload["ttl"] = self.ttl

        self.log("%s %s %s" % (method,uri,data))

        for r in range(self.retry):   
            try:
                r = requests.request(method, uri, data = payload)
                break
            except:
                time.sleep(self.sleep)
                pass

    def update(self, event):

        config = self.inspect(event["id"]) 

        if not config:
            return

        if config["Config"]["Hostname"] != "":
            dnsname = config["Config"]["Hostname"]
        else:
            dnsname = config["Name"].replace("/","")

        uri="%s/v2/keys/skydns/%s/%s" % (self.etcd, "/".join(self.domain.split(".")[::-1]), dnsname )
        method = None
        data = None

        # message = "UPDATE %s %s %s" % (event["status"], dnsname, config["NetworkSettings"]["IPAddress"])
        # self.log(message)

        if event["status"] == "start":
            data = { "Host" : config["NetworkSettings"]["IPAddress"] }
            method = "PUT"
        elif event["status"] in ("die", "stop"):
            data = {}
            method = "DELETE"
        
        if method:
            treq = threading.Thread(target = self.req, args = (method,uri,data,))
            treq.start()

    def listen(self):
        while True:
            e = self.client.events()
            for data in e:
                eventdata = json.loads(data)
                self.update(eventdata)
Example #44
0
    for rule in iptables_rules:
        # FYI: http://stackoverflow.com/questions/11269575/how-to-hide-output-of-subprocess-in-python-2-7
        ret = subprocess.call(shlex.split('{0} {1}'.format(iptables_check_command, rule)), stdout=DEVNULL, stderr=subprocess.STDOUT)
        if ret != 0:
            command = '{0} {1}'.format(iptables_insert_command, rule)
            syslog.syslog(syslog.LOG_INFO, 'network_container:{0}'.format(command))
            subprocess.call(shlex.split(command))

def on_stop(event):
    for rule in iptables_rules:
        ret = subprocess.call(shlex.split('{0} {1}'.format(iptables_check_command, rule)), stdout=DEVNULL, stderr=subprocess.STDOUT)
        if ret == 0:
            command = '{0} {1}'.format(iptables_delete_command, rule)
            syslog.syslog(syslog.LOG_INFO, 'network_container:{0}'.format(command))
            subprocess.call(shlex.split(command))

event_handlers = {}
event_handlers['start'] = on_start
event_handlers['stop'] = on_stop

# TODO: Use library to handle event loop
# TODO: Use fluentd for logging
gen = client.events(filters={'event': ['start', 'stop'], 'name': 'iptables'})
while True:
    # wait till event is invoked.
    event = json.loads(next(gen))
    syslog.syslog(syslog.LOG_INFO, 'network_container:{0}'.format(event))
    handler = event_handlers.get(event['status'])
    if handler:
        handler(event)
Example #45
0
class Collector:
    def __init__(self):
        self.logger = logging.getLogger("Monitor")
        self.INFLUXDB = {}
        self.INFLUXDB['HOST'] = os.getenv("INFLUXDB_HOST", "localhost")
        self.INFLUXDB['PORT'] = os.getenv("INFLUXDB_PORT", "8086")
        self.INFLUXDB['USERNAME'] = os.getenv("INFLUXDB_USERNAME", "root")
        self.INFLUXDB['PASSWORD'] = os.getenv("INFLUXDB_PASSWORD", "root")
        self.INFLUXDB['DBNAME'] = os.getenv("INFLUXDB_DBNAME", "autoscaling")
        self.INFLUXDB['SERIES'] = os.getenv("INFLUXDB_SERIES", "monitoring")
        self.COLLECT_TIME_INTERVAL = int(
            os.getenv("COLLECT_TIME_INTERVAL", "3"))
        self.BATH_TIME_INTERVAL = int(os.getenv("BATH_TIME_INTERVAL", "10"))

        self.influxdb_client = InfluxDBClient(
            host=self.INFLUXDB['HOST'],
            port=self.INFLUXDB['PORT'],
            username=self.INFLUXDB['USERNAME'],
            password=self.INFLUXDB['PASSWORD'],
            database=self.INFLUXDB['DBNAME'])

        self.docker_client = Client(base_url='unix://var/run/docker.sock')
        self.data_bath = []
        self.current_data = 0

    def collecting(self, container_id):
        mesos_task_id = ""
        app_name = ""
        container_envs = self.docker_client.inspect_container(
            container_id)['Config']['Env']
        for env in container_envs:
            if env.startswith('MESOS_TASK_ID'):
                mesos_task_id = env.split('=')[1]
                index = mesos_task_id.rfind('.')
                app_name = mesos_task_id[:index]
                break
        while True:
            try:
                stat = self.docker_client.stats(container_id,
                                                decode="utf8",
                                                stream=False)
                cpu_usage = (
                    stat["cpu_stats"]["cpu_usage"]["total_usage"] -
                    stat["precpu_stats"]["cpu_usage"]["total_usage"]
                ) * 100 / math.pow(10, 9) / len(
                    stat["cpu_stats"]["cpu_usage"]["percpu_usage"])
                mem_usage = stat["memory_stats"]["usage"] / stat[
                    "memory_stats"]["limit"] * 100
                current_time = datetime.now().timestamp()
                data = [
                    current_time, container_id, app_name, mesos_task_id,
                    cpu_usage, mem_usage
                ]
                self.logger.debug("Append: " + str(data))
                self.data_bath.append(data)
                time.sleep(self.COLLECT_TIME_INTERVAL)
            except errors.NotFound as e:
                self.logger.info(
                    "Container {} has gone away".format(container_id))
                break
            except Exception as e:
                self.logger.error("Error " + str(e))
                break

    def send_data(self):
        while True:
            try:
                time.sleep(self.BATH_TIME_INTERVAL)
                if self.data_bath:
                    data = dict()
                    data['name'] = self.INFLUXDB["SERIES"]
                    data['columns'] = [
                        'time', 'container_id', 'app_uuid', 'mesos_task_id',
                        "cpu_usage", "mem_usage"
                    ]
                    data['points'] = self.data_bath
                    self.data_bath = []
                    self.logger.info("Send data ...")
                    self.logger.debug(str(data))
                    self.influxdb_client.write_points([data])
            except Exception as e:
                self.logger.error("Error " + str(e))

    def run(self):
        with concurrent.futures.ThreadPoolExecutor(
                max_workers=100) as executor:
            executor.submit(self.send_data)
            containers = self.docker_client.containers()
            collectors = {}
            for container in containers:
                collectors[container["Id"]] = executor.submit(
                    self.collecting, container["Id"])
            events = self.docker_client.events(decode=True)

            for event in events:
                if event["status"] == "start":
                    self.logger.info("Start collector for: " + event["id"])
                    collectors[event["id"]] = executor.submit(
                        self.collecting, event["id"])
                elif event["status"] == "die":
                    try:
                        self.logger.info("Cancel collector for: " +
                                         event["id"])
                        collectors[event["id"]].cancel()
                    except Exception as e:
                        self.logger.debug(
                            "Exception when Cancel collector: {} : {}".format(
                                str(e), event["id"]))
Example #46
0
    if hostname:
        if '.' not in hostname:
            hostname = "{}{}".format(hostname, DOMAIN_SUFFIX)
        print("Adding {}".format(hostname))
        hosts = Hosts(HOSTS_PATH)
        my_ip = check_output([
            "/bin/sh", "-c",
            "ip -4 -f inet -o addr show eth0 | awk '{print $4}' | cut -d/ -f1"
        ])
        my_ip = my_ip.decode().strip()
        print("My IP: {}".format(my_ip))
        hosts.set_one(hostname, my_ip)
        hosts.write(HOSTS_PATH)
        print("Go to http://%s/" % hostname)

    for event in docker.events():
        event = json.loads(event)
        if 'status' not in event:
            continue
        if event['status'] == 'start':
            hostname = get_hostname(event['id'])
            if hostname is None:
                print(
                    "ERR: Event 'start' received but no hostname found for {}".
                    format(event['id']))
                continue
            container_ip = get_ip(event['id'])
            if not container_ip:
                print("ERR: Could not find IP address for {}".format(hostname))
                continue
            print("Adding {} as {}".format(hostname, container_ip))
def myjob(url):
    def download(url):
        return urllib2.urlopen(url).read()
    elapsed = yield sleep_task(random.uniform(0.0, 3.0))
    #~ sys.stderr.write("[slept_for:%0.2f]" % elapsed)
    #~ sys.stderr.write("[start_download:%s]" % url)
    #~ html = yield threaded_task(download, url)
    #~ sys.stderr.write("[done:%s:%d]" % (url, len(html)))      

def basso_continuo():
    sys.stderr.write(".")
    return True

now = datetime.datetime.now()
since = now #- datetime.timedelta(seconds=10)
live_events = docker_client.events(since=since)
while True:
    print '1'
    next(live_events)
    print '2'

urls = ["http://www.google.com", "http://python.com", "http://www.pygtk.org"]
jobs = [start_job(myjob(url)) for url in urls]

# See how easily can we raise a exception in the job couroutine:
# gobject.timeout_add(1000, lambda: jobs[0].throw(JobStopped))      

gobject.timeout_add(100, basso_continuo)
loop = gobject.MainLoop()
loop.run()
Example #48
0
class DockerSwarmInterface:
    def __init__(self, swarm_url, swarm_tls_ca_cert, swarm_tls_cert,
                 swarm_tls_key, swarm_allow_insecure, app_info, netscaler):
        tls_config = False
        if not swarm_allow_insecure:
            if swarm_url.startswith("tcp"):
                swarm_url = swarm_url.replace("tcp", "https")
                logger.info("Using swarm url %s" % swarm_url)
            tls_config = tls.TLSConfig(client_cert=(swarm_tls_cert,
                                                    swarm_tls_key),
                                       verify=swarm_tls_ca_cert,
                                       assert_hostname=False)
        self.client = Client(base_url=swarm_url, tls=tls_config)
        self.app_info = app_info
        self.netskaler = netscaler
        self.lock = threading.Lock()

    def get_backends_for_app(self, app_label):
        logger.info("Getting backends for app label %s" % app_label)
        containers = self.client.containers(filters={
            'status': 'running',
            'label': [app_label]
        })
        portConfigs = [n['Ports'] for n in containers]
        """
        [[{u'Type': u'tcp', u'PrivatePort': 443},
          {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 807, u'PrivatePort': 80}],
          [{u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 806, u'PrivatePort': 80},
          {u'Type': u'tcp', u'PrivatePort': 443}]]
        """
        result = []
        for ports in portConfigs:
            for port in ports:
                if port.get('PublicPort'):
                    # TODO: handle the case where more than one port is exposed
                    result.append((port['IP'], port['PublicPort']))

        return result

    def configure_ns_for_app(self, app_key, appname):
        self.lock.acquire()
        try:
            app_label = app_key + "=" + appname
            backends = self.get_backends_for_app(app_label)
            # backends = map(lambda y: ("192.168.99.100", y[1]), backends)
            # TODO: remove above for actual swarm. With plain docker machine,
            # host IP is "0.0.0.0" -- that cannot be load balanced. Docker
            # swarm supplies correct host IP.
            logger.debug("Backends are %s" % str(backends))
            self.netskaler.configure_app(appname, backends)
        finally:
            self.lock.release()

    def configure_all(self):
        app_key = self.app_info['appkey']
        appnames = map(lambda x: x['name'], self.app_info['apps'])
        logger.info("Configuring for app names: %s" % str(appnames))
        for appname in appnames:
            self.configure_ns_for_app(app_key, appname)
        self.watch_all_apps()
        self.wait_for_all()

    def watch_app(self, app_key, appname):
        app_label = app_key + "=" + appname
        events = self.client.events(filters={
            "event": ["start", "kill", "die"],
            "label": [app_label]
        })
        for e in events:
            evj = json.loads(e)
            status = evj['status']
            c_id = evj['id']
            if status in ['start', 'die', 'kill']:
                # TODO: BUG in docker swarm events does not actually apply
                # filters. Use 'docker ps' to verify the app that is changing
                # belongs to this thread
                logger.debug("Event status: %s, id: %.12s" %
                             (evj['status'], evj['id']))
                containers = self.client.containers(
                    all=True, filters={"label": [app_label]})
                container_ids = [
                    c.get('Id') for c in containers if c.get('Id') == c_id
                ]
                if container_ids:
                    logger.info("Configuring NS for app %s, "
                                "container id=%.12s" % (appname, c_id))
                    self.configure_ns_for_app(app_key, appname)

    def watch_all_apps(self):
        app_key = self.app_info['appkey']
        appnames = map(lambda x: x['name'], self.app_info['apps'])
        for appname in appnames:
            logger.debug("Watching for events for app: %s" % str(appname))
            t = threading.Thread(target=self.watch_app,
                                 args=(
                                     app_key,
                                     appname,
                                 ))
            t.start()

    def wait_for_all(self):
        main_thread = threading.currentThread()
        for t in threading.enumerate():
            if t is main_thread:
                continue
            logging.debug('joining %s', t.getName())
            t.join()
Example #49
0
    for e in valid_events:
        key, value = e.replace('ACT_', '').split('=')
        runner_file = '{}/{}'.format(runner_dir, key)
        if os.path.isfile(runner_file):
            inspect_file = '{}/{}_{}.json'.format(inspect_dir, docker_id,
                                                  status)
            with open(inspect_file, 'w') as fp:
                fp.write(json.dumps(inspect, indent=2))
            try:
                # We need to keep a referrence to the process so we can remove the reference
                # when they are done. See comment below.
                # We also need to spawn the process in the backround, therefor, Popen, not call.
                procs.append(
                    subprocess.Popen([runner_file, value, docker_id, status]))

                # Delete the references to completed processes so python will do garbage collection
                # to kill old processes. They will be zombies from the time they are done to when
                # this code is run. A zombie doesnt use any resources, but its nice to get rid of them.
                procs[:] = [proc for proc in procs if proc.poll() is None]
            except OSError:
                # Probably not executable..
                return


for i in c.containers(filters={'status': 'running'}):
    handler(docker_id=i['Id'], status='running')

# c.events() returns a blocking generator
for event in c.events(decode=True):
    handler(docker_id=event['id'], status=event['status'])
Example #50
0
class Agent(object):
    def __init__(self, docker_url='unix:///var/run/docker.sock', handler_paths=None, script_paths=None, once=False, watch=False):
        self.docker = Client(base_url=docker_url)
        exec_prefix = os.path.dirname(sys.argv[0])
        script_paths = relative_to_exec_dirs([
            '../libexec/docker-glue',
            '../../libexec/docker-glue',
            '/usr/libexec/docker-glue',
            './handler-scripts',
        ])
        self.script_paths = script_paths
        handler_paths = relative_to_exec_dirs(['../etc', '../../etc', '/etc', '.'], suffix='/docker-glue.d')
        self.load_handlers(handler_paths)
        if once: self.once('none', '')
        elif watch: self.loop()

    def resolve_script(self, script):
        if os.path.isabs(script): return script
        for path in self.script_paths:
             fn = os.path.realpath(os.path.join(path, script))
             if os.path.exists(fn) and os.access(fn, os.X_OK): return fn
        return None

    def load_handlers(self, paths):
        handlers=[]
        by_event=defaultdict(list)
        done=set()
        for path in paths:
            if path in done: continue
            logger.info("looking for handlers in [%s]", path)
            done.add(path)
            for ini_file in glob.glob(os.path.join(path, '*.ini')):
                logger.info("parsing [%s]",ini_file)
                ini = ConfigParser.RawConfigParser()
                ini.read(ini_file)
                handler=factory(ini.get('handler', 'class'), self, ini_file, ini)
                handlers.append(handler)
                for event in handler.events:
                      by_event[event].append(handler)
        logger.debug("handlers by event: %r",by_event)
        self.handlers = handlers
        self.handlers_by_event = by_event

    def once(self, event, container_id):
        # event can be: create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause
        triggers_none=False
        handlers=self.handlers_by_event[event]
        if event!='none': handlers+=self.handlers_by_event['all']
        logger.debug('got handlers %r', handlers)
        for handler in handlers:
            logger.debug("passing event=%r container_id=%r to handler=%r", event, container_id, handler)
            if not handler.enabled: continue
            handler.handle(event, container_id)
            triggers_none|=handler.triggers_none
        if event!='none' and triggers_none: self.once('none', '')

    def loop(self):
        logger.info("waiting for docker events")
        self.once('none', '')
        for event in self.docker.events(decode=True):
            # event looks like this {"status":"die","id":"123","from":"foobar/eggs:latest","time":1434047926}
            self.once(event['status'], event['id'])