def _get_reader(self): with open(self.data_file) as fhd: header = fhd.readline(2048).strip() # just header chunk of file # TODO: detect CSV dialect for JTLs if header.startswith(self.AB_HEADER): reader = TSVDataReader(self.data_file, self.log) reader.url_label = "N/A" return reader elif header.startswith("<?xml"): return XMLJTLReader(self.data_file, self.log) elif self.PBENCH_FORMAT.match(header): return PBenchKPIReader(self.data_file, self.log, self.errors_file) elif header.startswith("RUN\t") or "\tRUN\t" in header: return GatlingLogReader(self.data_file, self.log, None) elif "timestamp" in header.lower() and "elapsed" in header.lower(): return JTLReader(self.data_file, self.log, self.errors_file) elif "worker process" in header.lower() and header.startswith( "worker."): return GrinderLogReader(self.data_file, self.log) else: self.log.info("Header line was: %s", header) raise TaurusInternalException( "Unable to detect results format for: %s" % self.data_file)
def test_read(self): log_path = os.path.abspath(RESOURCES_DIR + "ab/ab.tsv") obj = TSVDataReader(log_path, ROOT_LOGGER) list_of_values = list(obj.datapoints(True)) self.assertEqual(len(list_of_values), 3) for values in list_of_values: self.assertTrue(1400000000 < values['ts'] < 1500000000)
def test_read(self): log_path = path.join(get_res_path('ab.tsv')) obj = TSVDataReader(log_path, logging.getLogger('')) list_of_values = list(obj.datapoints(True)) self.assertEqual(len(list_of_values), 3) for values in list_of_values: self.assertTrue(1400000000 < values['ts'] < 1500000000) self.assertEqual(len(values), 5)
def test_read(self): log_path = os.path.abspath(RESOURCES_DIR + "ab/ab.tsv") obj = TSVDataReader(log_path, logging.getLogger("")) list_of_values = list(obj.datapoints(True)) self.assertEqual(len(list_of_values), 3) for values in list_of_values: self.assertTrue(1400000000 < values['ts'] < 1500000000) self.assertEqual(len(values), 5)
def _get_reader(self): with open(self.data_file) as fhd: header = fhd.readline(2048).strip() # just header chunk of file if header.startswith(self.AB_HEADER): reader = TSVDataReader(self.data_file, self.log) reader.url_label = "N/A" return reader elif header.startswith("<?xml"): return XMLJTLReader(self.data_file, self.log) elif header.startswith("RUN\t") or "\tRUN\t" in header: return GatlingLogReader(self.data_file, self.log, None) elif "timestamp" in header.lower() and "elapsed" in header.lower(): return JTLReader(self.data_file, self.log, self.errors_file) elif re.match("^[0-9]{19},", header): # Vegeta CSV does not have a header, every line starts with a timestamp in nanoseconds return VegetaLogReader(self.data_file, self.log) else: self.log.info("Header line was: %s", header) raise TaurusInternalException("Unable to detect results format for: %s" % self.data_file)
def _get_reader(self): with open(self.data_file) as fhd: header = fhd.readline(2048).strip() # just header chunk of file # TODO: detect CSV dialect for JTLs if header.startswith(self.AB_HEADER): reader = TSVDataReader(self.data_file, self.log) reader.url_label = "N/A" return reader elif header.startswith("<?xml"): return XMLJTLReader(self.data_file, self.log) elif self.PBENCH_FORMAT.match(header): return PBenchKPIReader(self.data_file, self.log, self.errors_file) elif header.startswith("RUN\t") or "\tRUN\t" in header: return GatlingLogReader(self.data_file, self.log, None) elif "timestamp" in header.lower() and "elapsed" in header.lower(): return JTLReader(self.data_file, self.log, self.errors_file) elif "worker process" in header.lower() and header.startswith("worker."): return GrinderLogReader(self.data_file, self.log) else: self.log.info("Header line was: %s", header) raise TaurusInternalException("Unable to detect results format for: %s" % self.data_file)