def _docker_wait_for_log(container: str, program: str, regex: str, start_line: int, timeout: float) -> bool: logfilename = os.path.join(rootdir, 'logs', container, 'beerocks_{}.log'.format(program)) # WSL doesn't support symlinks on NTFS, so resolve the symlink manually if on_wsl: logfilename = os.path.join( rootdir, 'logs', container, subprocess.check_output(["tail", "-2", logfilename]).decode('utf-8'). rstrip(' \t\r\n\0')) deadline = time.monotonic() + timeout try: while True: with open(logfilename) as logfile: for (i, v) in enumerate(logfile.readlines()): if i <= start_line: continue search = re.search(regex, v) if search: debug("Found '{}'\n\tin {}".format(regex, logfilename)) return (True, i, search.groups()) if time.monotonic() < deadline: time.sleep(.3) else: err("Can't find '{}'\n\tin log of {} on {} after {}s".format(regex, program, container, timeout)) return (False, start_line, None) except OSError: err("Can't read log of {} on {}".format(program, container)) return (False, start_line, None)
def start(self, outputfile_basename): '''Start tcpdump to outputfile.''' debug("Starting tcpdump, output file {}.pcap".format( outputfile_basename)) os.makedirs(os.path.join(self.tcpdump_log_dir, 'logs'), exist_ok=True) self.current_outputfile = os.path.join(self.tcpdump_log_dir, outputfile_basename) + ".pcap" self.checkpoint_frame_number = 0 command = [ "tcpdump", "-i", self.interface, '-U', '-w', self.current_outputfile, "ether proto 0x88CC or ether proto 0x893A" ] self.tcpdump_proc = subprocess.Popen(command, stderr=subprocess.PIPE) # tcpdump takes a while to start up. Wait for the appropriate output before continuing. # poll() so we exit the loop if tcpdump terminates for any reason. while not self.tcpdump_proc.poll(): line = self.tcpdump_proc.stderr.readline() debug(line.decode()[:-1]) # strip off newline if line.startswith(b"tcpdump: listening on " + self.interface.encode()): # Make sure it doesn't block due to stderr buffering self.tcpdump_proc.stderr.close() break else: err("tcpdump terminated") self.tcpdump_proc = None self.current_outputfile = None
def get_packet_capture(self): '''Get a list of packets from the last started tcpdump.''' if not self.current_outputfile: err("get_packet_capture but no capture file") return [] tshark_command = ['tshark', '-r', self.current_outputfile, '-T', 'json', '-Y', 'frame.number >= {}'.format(self.checkpoint_frame_number)] tshark_result = subprocess.run(tshark_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if tshark_result.returncode != 0: debug(tshark_result.stderr) debug("tshark failed: {}".format(tshark_result.returncode)) # Regardless of the exit code, try to make something of the JSON that comes out, if any. try: # tlvs which have the same type are all recorded with the same key therefore we lose # all but one of them if we use json.loads(tshark_result.stdout) directly. # https://stackoverflow.com/questions/29321677/python-json-parser-allow-duplicate-keys def rename_duplicate_key(key, dct): counter = 0 unique_key = key while unique_key in dct: counter += 1 unique_key = '{} {}'.format(key, counter + 1) return unique_key def rename_duplicates(pairs): dct = OrderedDict() for key, value in pairs: if key in dct: key = rename_duplicate_key(key, dct) dct[key] = value return dct decoder = json.JSONDecoder(object_pairs_hook=rename_duplicates) capture = decoder.decode(tshark_result.stdout.decode('utf8')) return [Packet(x) for x in capture] except json.JSONDecodeError as error: err("capture JSON decoding failed: {}".format(error)) return []
def run_tests(self, tests): '''Run all tests as specified on the command line.''' total_errors = 0 if not tests: tests = self.tests for test in tests: test_full = 'test_' + test self.start_test(test) env.wired_sniffer.start(test_full) self.check_error = 0 try: getattr(self, test_full)() finally: env.wired_sniffer.stop() if self.check_error != 0: err(test + " failed") else: message(test + " OK", 32) total_errors += self.check_error return total_errors
def __init__(self, *args, **kwargs): '''Generate dummy Station.''' self.args = args self.kwargs = kwargs config = kwargs.get("config", kwargs) self.name = config.get("name", "station") self.mac = config.get("mac", None) if self.mac is None: raise ValueError( err("{} device \"{}\" has no MAC!".format( self.model, self.name)))
def start(self, outputfile_basename): '''Start tcpdump if enabled by config.''' if opts.tcpdump: debug("Starting tcpdump, output file {}.pcap".format( outputfile_basename)) os.makedirs(os.path.join(opts.tcpdump_dir, 'logs'), exist_ok=True) outputfile = os.path.join(opts.tcpdump_dir, outputfile_basename) + ".pcap" command = ["tcpdump", "-i", self.interface, "-w", outputfile] self.tcpdump_proc = subprocess.Popen(command, stderr=subprocess.PIPE) # tcpdump takes a while to start up. Wait for the appropriate output before continuing. # poll() so we exit the loop if tcpdump terminates for any reason. while not self.tcpdump_proc.poll(): line = self.tcpdump_proc.stderr.readline() debug(line.decode()[:-1]) # strip off newline if line.startswith(b"tcpdump: listening on " + self.interface.encode()): # Make sure it doesn't block due to stderr buffering self.tcpdump_proc.stderr.close() break else: err("tcpdump terminated") self.tcpdump_proc = None
def get_packet_capture(self): '''Get a list of packets from the last started tcpdump.''' if not self.current_outputfile: err("get_packet_capture but no capture file") return [] tshark_command = [ 'tshark', '-r', self.current_outputfile, '-T', 'json', '-Y', 'frame.number >= {}'.format(self.checkpoint_frame_number) ] tshark_result = subprocess.run(tshark_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if tshark_result.returncode != 0: err(tshark_result.stderr) err("tshark failed: {}".format(tshark_result.returncode)) return [] try: capture = json.loads(tshark_result.stdout) return [Packet(x) for x in capture] except json.JSONDecodeError as error: err("capture JSON decoding failed: %s".format(error)) return []
def fail(self, msg: str) -> bool: '''Print a red error message, increment failure count and return False.''' err('FAIL: {}'.format(msg)) return self.__fail_no_message()