def _remove_collector(self,cid): c = self._get_collector(cid) c.terminate() while c.is_alive(): sleep(.2) output('collector stopped for container %s' % cid) self.children = [ c for c in self.children if c.name != cid ]
def __init__(self,docker_host,redis_host='127.0.0.1',redis_port=6379): self.docker = Client(base_url=docker_host) self.source = self.docker.info()['Name'] self.ncpu = self.docker.info()['NCPU'] self.redis = StrictRedis(host=redis_host,port=redis_port,db=0) self.children = [] self.stopped = False log.info('Connected to Docker API at url %s' % docker_host) output('starting collector on source %s' % self.source) self.start()
def run_forever(self): stat_count = 0 output('listener started') for msg in self.sub.listen(): self._process_msg(msg['data']) stat_count += 1 if self._is_maint_interval(): output('processed %s stats in last %ss' % \ (stat_count,self.maint_interval)) stat_count = 0 self._flush_all()
def _event_listener(self): """ Listen for docker events and dynamically add or remove stat collectors based on start and die events """ output('started event listener') for event in self.docker.events(): event = json.loads(event.decode('utf-8')) if event['status'] == 'start': self._add_collector(event['id']) if event['status'] == 'die': self._remove_collector(event['id'])
def _collector(self,cid,cname): """ Collector instance collects stats via Docker API streaming web socket, appending container name and source, and publishing to redis params: - cid(str): ID of container to collect stats from - cname(str): Name of container """ sleep(5) # sleep to allow container to fully start output('started collector for container %s' % cid) stats = self.docker.stats(cid, decode=True) for stat in stats: #append additional information to the returned stat stat['container_name'] = cname stat['container_id'] = cid stat['source'] = self.source stat['ncpu'] = self.ncpu self.redis.publish('statsquid', msgpack.packb(stat)) if self.stopped: break