def crawl(self, container_id=None, **kwargs): try: import redis except ImportError: import pip pip.main(['install', 'redis']) import redis # only crawl redis container. Otherwise, quit. c = dockercontainer.DockerContainer(container_id) port = self.get_port(c) if not port: return state = c.inspect['State'] pid = str(state['Pid']) ips = run_as_another_namespace( pid, ['net'], utils.misc.get_host_ip4_addresses) for each_ip in ips: if each_ip != "127.0.0.1": ip = each_ip break client = redis.Redis(host=ip, port=port) try: metrics = client.info() feature_attributes = feature.create_feature(metrics) return [(self.feature_key, feature_attributes, self.feature_type)] except: logger.info("redis does not listen on port:%d", port) raise ConnectionError("no listen at %d", port)
def crawl(self, container_id, avoid_setns=False, **kwargs): logger.debug("Crawling %s for container %s" % (self.get_feature(), container_id)) container = DockerContainer(container_id) if avoid_setns: raise NotImplementedError("avoidsetns mode not implemented") else: interfaces = run_as_another_namespace(container.pid, ["net"], self._crawl_interface_counters) for (ifname, curr_count) in interfaces: feature_key = "{0}-{1}".format("interface", ifname) cache_key = "{0}-{1}-{2}".format(container.long_id, container.pid, feature_key) (prev_count, prev_time) = self._cache_get_value(cache_key) self._cache_put_value(cache_key, curr_count) if prev_count and prev_time: d = time.time() - prev_time diff = [(a - b) / d for (a, b) in zip(curr_count, prev_count)] else: # first measurement diff = [0] * 6 feature_attributes = InterfaceFeature._make(diff) yield (feature_key, feature_attributes, "interface")
def crawl(self, container_id=None, avoid_setns=False, root_dir='/', **kwargs): logger.debug('Crawling packages for container %s' % container_id) inspect = exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) if avoid_setns: rootfs_dir = get_docker_container_rootfs_path(container_id) return crawl_packages(root_dir=join_abs_paths( rootfs_dir, root_dir), reload_needed=True) else: # in all other cases, including wrong mode set try: return run_as_another_namespace(pid, ALL_NAMESPACES, crawl_packages, None, root_dir, 0, False) except CrawlError: # Retry the crawl avoiding the setns() syscall. This is # needed for PPC where we can not jump into the container and # run its apt or rpm commands. rootfs_dir = get_docker_container_rootfs_path(container_id) return crawl_packages(root_dir=join_abs_paths( rootfs_dir, root_dir), reload_needed=True)
def crawl(self, container_id, avoid_setns=False, **kwargs): logger.debug('Crawling %s for container %s' % (self.get_feature(), container_id)) container = DockerContainer(container_id) if avoid_setns: raise NotImplementedError('avoidsetns mode not implemented') else: interfaces = run_as_another_namespace( container.pid, ['net'], self._crawl_interface_counters) for (ifname, curr_count) in interfaces: feature_key = '{0}-{1}'.format('interface', ifname) cache_key = '{0}-{1}-{2}'.format(container.long_id, container.pid, feature_key) (prev_count, prev_time) = self._cache_get_value(cache_key) self._cache_put_value(cache_key, curr_count) if prev_count and prev_time: d = time.time() - prev_time diff = [(a - b) / d for (a, b) in zip(curr_count, prev_count)] else: # first measurement diff = [0] * 6 feature_attributes = InterfaceFeature._make(diff) yield (feature_key, feature_attributes, 'interface')
def crawl(self, container_id=None, avoid_setns=False, root_dir='/', **kwargs): logger.debug('Crawling packages for container %s' % container_id) inspect = exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) if avoid_setns: rootfs_dir = get_docker_container_rootfs_path( container_id) return crawl_packages( root_dir=join_abs_paths(rootfs_dir, root_dir), reload_needed=True) else: # in all other cases, including wrong mode set try: return run_as_another_namespace(pid, ALL_NAMESPACES, crawl_packages, None, root_dir, 0, False) except CrawlError: # Retry the crawl avoiding the setns() syscall. This is # needed for PPC where we can not jump into the container and # run its apt or rpm commands. rootfs_dir = get_docker_container_rootfs_path( container_id) return crawl_packages( root_dir=join_abs_paths(rootfs_dir, root_dir), reload_needed=True)
def crawl(self, container_id=None, **kwargs): try: import redis except ImportError: import pip pip.main(['install', 'redis']) import redis # only crawl redis container. Otherwise, quit. c = dockercontainer.DockerContainer(container_id) port = self.get_port(c) if not port: return state = c.inspect['State'] pid = str(state['Pid']) ips = run_as_another_namespace(pid, ['net'], utils.misc.get_host_ip4_addresses) for each_ip in ips: if each_ip != "127.0.0.1": ip = each_ip break client = redis.Redis(host=ip, port=port) try: metrics = client.info() feature_attributes = feature.create_feature(metrics) return [(self.feature_key, feature_attributes, self.feature_type)] except: logger.info("redis does not listen on port:%d", port) raise ConnectionError("no listen at %d", port)
def test_run_as_another_namespace_function_mixed_args(self): res = run_as_another_namespace(self.pid, all_namespaces, func_mixed_args, "arg1", arg2="arg2") assert res == "test arg1 arg2" print sys._getframe().f_code.co_name, 1
def crawl(self, container_id, avoid_setns=False, **kwargs): container = DockerContainer(container_id) logger.debug('Crawling %s for container %s' % (self.get_feature(), container_id)) if avoid_setns: raise NotImplementedError() else: # in all other cases, including wrong mode set return run_as_another_namespace(container.pid, ALL_NAMESPACES, self.crawl_load)
def crawl(self, container_id, avoid_setns=False, **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug('Crawling OS for container %s' % container_id) if avoid_setns: raise NotImplementedError() else: return run_as_another_namespace(pid, ALL_NAMESPACES, self.crawl_in_system)
def crawl(self, container_id, avoid_setns=False, **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug('Crawling %s for container %s' % (self.get_feature(), container_id)) if avoid_setns: raise NotImplementedError('avoidsetns mode not implemented') else: # in all other cases, including wrong mode set return run_as_another_namespace(pid, ALL_NAMESPACES, crawl_metrics)
def crawl(self, container_id, avoid_setns=False, **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug('Crawling OS for container %s' % container_id) if avoid_setns: mp = utils.dockerutils.get_docker_container_rootfs_path( container_id) return crawl_os_mountpoint(mp) else: # in all other cases, including wrong mode set return run_as_another_namespace(pid, ALL_NAMESPACES, crawl_os)
def crawl(self, container_id=None, **kwargs): password, user = self.get_opt(kwargs) c = dockercontainer.DockerContainer(container_id) port = None if "annotation.io.kubernetes.container.ports" in\ c.inspect['Config']['Labels']: ports = c.inspect['Config']['Labels'][ 'annotation.io.kubernetes.container.ports'] ports = json.loads(ports) else: ports = c.get_container_ports() for each_port in ports: tmp_port = None if "containerPort" in each_port: tmp_port = int(each_port['containerPort']) else: tmp_port = int(each_port) if tmp_port == self.default_port: port = tmp_port if not port: return state = c.inspect['State'] pid = str(state['Pid']) ips = run_as_another_namespace( pid, ['net'], utils.misc.get_host_ip4_addresses) for each_ip in ips: if each_ip != "127.0.0.1": ip = each_ip break # crawl all candidate ports try: return tomcat_crawler.retrieve_metrics( host=ip, port=port, user=user, password=password, feature_type=self.feature_type) except: raise ConnectionError("%s has no accessible endpoint for %s", container_id, self.feature_key)
def crawl(self, container_id, avoid_setns=False, **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug('Crawling OS for container %s' % container_id) if avoid_setns: return self._crawl_without_setns(container_id) else: # in all other cases, including wrong mode set self.get_packages_generic = False # can be made an arg to crawl() return run_as_another_namespace(pid, ALL_NAMESPACES, self._crawl_in_system)
def crawl(self, container_id, avoid_setns=False, **kwargs): container = DockerContainer(container_id) logger.debug( 'Crawling %s for container %s' % (self.get_feature(), container_id)) if avoid_setns: raise NotImplementedError() else: # in all other cases, including wrong mode set return run_as_another_namespace(container.pid, ALL_NAMESPACES, self.crawl_load)
def crawl(self, container_id=None, **kwargs): password, user, db = self.get_opt(kwargs) c = dockercontainer.DockerContainer(container_id) port = None if "annotation.io.kubernetes.container.ports" in\ c.inspect['Config']['Labels']: ports = c.inspect['Config']['Labels'][ 'annotation.io.kubernetes.container.ports'] ports = json.loads(ports) else: ports = c.get_container_ports() for each_port in ports: tmp_port = None if "containerPort" in each_port: tmp_port = int(each_port['containerPort']) else: tmp_port = int(each_port) if tmp_port == self.default_port: port = tmp_port if not port: return state = c.inspect['State'] pid = str(state['Pid']) ips = run_as_another_namespace( pid, ['net'], utils.misc.get_host_ip4_addresses) for each_ip in ips: if each_ip != "127.0.0.1": ip = each_ip break try: metrics = db2_crawler.retrieve_metrics( host=ip, user=user, password=password, db=db, ) return [(self.feature_key, metrics, self.feature_type)] except: logger.info("db2 does not listen on port:%d", port) raise ConnectionError("db2 does not listen on port:%d", port)
def crawl(self, container_id=None, **kwargs): password, user, db = self.get_opt(kwargs) c = dockercontainer.DockerContainer(container_id) port = None if "annotation.io.kubernetes.container.ports" in\ c.inspect['Config']['Labels']: ports = c.inspect['Config']['Labels'][ 'annotation.io.kubernetes.container.ports'] ports = json.loads(ports) else: ports = c.get_container_ports() for each_port in ports: tmp_port = None if "containerPort" in each_port: tmp_port = int(each_port['containerPort']) else: tmp_port = int(each_port) if tmp_port == self.default_port: port = tmp_port if not port: return state = c.inspect['State'] pid = str(state['Pid']) ips = run_as_another_namespace(pid, ['net'], utils.misc.get_host_ip4_addresses) for each_ip in ips: if each_ip != "127.0.0.1": ip = each_ip break try: metrics = db2_crawler.retrieve_metrics( host=ip, user=user, password=password, db=db, ) return [(self.feature_key, metrics, self.feature_type)] except: logger.info("db2 does not listen on port:%d", port) raise ConnectionError("db2 does not listen on port:%d", port)
def crawl(self, container_id=None, **kwargs): password, user = self.get_opt(kwargs) c = dockercontainer.DockerContainer(container_id) port = None if "annotation.io.kubernetes.container.ports" in\ c.inspect['Config']['Labels']: ports = c.inspect['Config']['Labels'][ 'annotation.io.kubernetes.container.ports'] ports = json.loads(ports) else: ports = c.get_container_ports() for each_port in ports: tmp_port = None if "containerPort" in each_port: tmp_port = int(each_port['containerPort']) else: tmp_port = int(each_port) if tmp_port == self.default_port: port = tmp_port if not port: return state = c.inspect['State'] pid = str(state['Pid']) ips = run_as_another_namespace(pid, ['net'], utils.misc.get_host_ip4_addresses) for each_ip in ips: if each_ip != "127.0.0.1": ip = each_ip break # crawl all candidate ports try: return tomcat_crawler.retrieve_metrics( host=ip, port=port, user=user, password=password, feature_type=self.feature_type) except: raise ConnectionError("%s has no accessible endpoint for %s", container_id, self.feature_key)
def crawl( self, container_id=None, avoid_setns=False, root_dir='/', exclude_dirs=[ '/dev', '/proc', '/mnt', '/tmp', '/var/cache', '/usr/share/man', '/usr/share/doc', '/usr/share/mime'], known_config_files=[ '/etc/passwd', '/etc/group', '/etc/hosts', '/etc/hostname', '/etc/mtab', '/etc/fstab', '/etc/aliases', '/etc/ssh/ssh_config', '/etc/ssh/sshd_config', '/etc/sudoers'], discover_config_files=False, **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug('Crawling config for container %s' % container_id) if avoid_setns: rootfs_dir = utils.dockerutils.get_docker_container_rootfs_path( container_id) exclude_dirs = [utils.misc.join_abs_paths(rootfs_dir, d) for d in exclude_dirs] return crawl_config_files( root_dir=utils.misc.join_abs_paths(rootfs_dir, root_dir), exclude_dirs=exclude_dirs, root_dir_alias=root_dir, known_config_files=known_config_files, discover_config_files=discover_config_files) else: # in all other cases, including wrong mode set return run_as_another_namespace(pid, ['mnt'], crawl_config_files, root_dir, exclude_dirs, None, known_config_files, discover_config_files)
def crawl(self, container_id, avoid_setns=False, **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug( 'Crawling %s for container %s' % (self.get_feature(), container_id)) if avoid_setns: raise NotImplementedError('avoidsetns mode not implemented') else: # in all other cases, including wrong mode set return run_as_another_namespace(pid, ALL_NAMESPACES, crawl_disk_partitions)
def crawl(self, container_id=None, **kwargs): c = dockercontainer.DockerContainer(container_id) port = None if "annotation.io.kubernetes.container.ports" in\ c.inspect['Config']['Labels']: ports = c.inspect['Config']['Labels'][ 'annotation.io.kubernetes.container.ports'] ports = json.loads(ports) else: ports = c.get_container_ports() for each_port in ports: tmp_port = None if "containerPort" in each_port: tmp_port = int(each_port['containerPort']) else: tmp_port = int(each_port) if tmp_port == self.default_port: port = tmp_port if not port: return state = c.inspect['State'] pid = str(state['Pid']) ips = run_as_another_namespace(pid, ['net'], utils.misc.get_host_ip4_addresses) for each_ip in ips: if each_ip != "127.0.0.1": ip = each_ip break # crawl all candidate ports try: metrics = nginx_crawler.retrieve_metrics(ip, port) return [(self.feature_key, metrics, self.feature_type)] except: logger.error("can't find metrics endpoint at http://%s:%s", ip, port) raise ConnectionError( "can't find metrics endpoint" "at http://%s:%s", ip, port)
def start_container_fprobes(self, container_id, avoid_setns=False, **kwargs): """ Unless flow probes are already running on the interfaces of the given container, we start them. """ inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) if avoid_setns: raise NotImplementedError('avoidsetns mode not implemented') ifnames = [] try: peers = run_as_another_namespace(pid, ['net'], self._crawl_in_system) for peer in peers: # in rare cases we get an interface without IP address # assigned ot it, yet; we skip it for now and try again # on the next crawl if len(peer.ip_addresses) == 0: continue try: ifname = if_indextoname(peer.peer_ifindex) except: continue ifnames.append(ifname) if self.need_start_fprobe(ifname): logger.info('Need to start softflowd on %s' % ifname) pid = self.start_netflow_collection(ifname, peer.ip_addresses, container_id, **kwargs) if pid: FprobeContainerCrawler.fprobes_started[ifname] = pid except Exception as ex: logger.info("Error: %s" % str(ex)) return ifnames
def start_container_fprobes(self, container_id, avoid_setns=False, **kwargs): """ Unless flow probes are already running on the interfaces of the given container, we start them. """ inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) if avoid_setns: raise NotImplementedError('avoidsetns mode not implemented') ifnames = [] try: peers = run_as_another_namespace(pid, ['net'], self._crawl_in_system) for peer in peers: # in rare cases we get an interface without IP address # assigned ot it, yet; we skip it for now and try again # on the next crawl if len(peer.ip_addresses) == 0: continue try: ifname = if_indextoname(peer.peer_ifindex) except: continue ifnames.append(ifname) if self.need_start_fprobe(ifname): logger.info('Need to start softflowd on %s' % ifname) pid = self.start_netflow_collection( ifname, peer.ip_addresses, container_id, **kwargs) if pid: FprobeContainerCrawler.fprobes_started[ifname] = pid except Exception as ex: logger.info("Error: %s" % str(ex)) return ifnames
def crawl( self, container_id=None, avoid_setns=False, root_dir='/', exclude_dirs=[ '/boot', '/dev', '/proc', '/sys', '/mnt', '/tmp', '/var/cache', '/usr/share/man', '/usr/share/doc', '/usr/share/mime'], **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug('Crawling file for container %s' % container_id) if avoid_setns: rootfs_dir = utils.dockerutils.get_docker_container_rootfs_path( container_id) exclude_dirs = [utils.misc.join_abs_paths(rootfs_dir, d) for d in exclude_dirs] return crawl_files( root_dir=utils.misc.join_abs_paths(rootfs_dir, root_dir), exclude_dirs=exclude_dirs, root_dir_alias=root_dir) else: # in all other cases, including wrong mode set return run_as_another_namespace(pid, ['mnt'], crawl_files, root_dir, exclude_dirs, None)
def crawl(self, container_id=None, avoid_setns=False, root_dir='/', exclude_dirs=[ '/dev', '/proc', '/mnt', '/tmp', '/var/cache', '/usr/share/man', '/usr/share/doc', '/usr/share/mime' ], known_config_files=[ '/etc/passwd', '/etc/group', '/etc/hosts', '/etc/hostname', '/etc/mtab', '/etc/fstab', '/etc/aliases', '/etc/ssh/ssh_config', '/etc/ssh/sshd_config', '/etc/sudoers' ], discover_config_files=False, **kwargs): inspect = utils.dockerutils.exec_dockerinspect(container_id) state = inspect['State'] pid = str(state['Pid']) logger.debug('Crawling config for container %s' % container_id) if avoid_setns: rootfs_dir = utils.dockerutils.get_docker_container_rootfs_path( container_id) exclude_dirs = [ utils.misc.join_abs_paths(rootfs_dir, d) for d in exclude_dirs ] return crawl_config_files( root_dir=utils.misc.join_abs_paths(rootfs_dir, root_dir), exclude_dirs=exclude_dirs, root_dir_alias=root_dir, known_config_files=known_config_files, discover_config_files=discover_config_files) else: # in all other cases, including wrong mode set return run_as_another_namespace(pid, ['mnt'], crawl_config_files, root_dir, exclude_dirs, None, known_config_files, discover_config_files)
def test_run_as_another_namespace_infinite_loop_function(self): with self.assertRaises(CrawlTimeoutError): run_as_another_namespace( self.pid, all_namespaces, func_infinite_loop, "arg")
def test_run_as_another_namespace_simple_function_no_args(self): res = run_as_another_namespace(self.pid, all_namespaces, func_no_args) assert res == "test default" print sys._getframe().f_code.co_name, 1
def test_run_as_another_namespace_crashing_function(self): with self.assertRaises(FooError): run_as_another_namespace( self.pid, all_namespaces, func_crash, "arg")
def test_run_as_another_namespace_crashing_function(self): with self.assertRaises(FooError): run_as_another_namespace(self.pid, all_namespaces, func_crash, "arg")
def test_run_as_another_namespace_infinite_loop_function(self): with self.assertRaises(CrawlTimeoutError): run_as_another_namespace(self.pid, all_namespaces, func_infinite_loop, "arg")
def test_run_as_another_namespace_function_mixed_args(self): res = run_as_another_namespace( self.pid, all_namespaces, func_mixed_args, "arg1", arg2="arg2") assert res == "test arg1 arg2" print sys._getframe().f_code.co_name, 1