def testMultipleRows(self): writer = utils.CsvWriter() writer.WriteRow(["foo", "quux"]) writer.WriteRow(["bar", "norf"]) writer.WriteRow(["baz", "thud"]) self.assertEqual(writer.Content(), "foo,quux\nbar,norf\nbaz,thud\n")
def testUnicode(self): writer = utils.CsvWriter() writer.WriteRow(["jodła", "świerk", "dąb"]) writer.WriteRow(["żyto", "jęczmień", "ryż"]) self.assertEqual(writer.Content(), "jodła,świerk,dąb\nżyto,jęczmień,ryż\n")
def _GenerateBodyExport(self, file_infos): for path, st, hash_v in file_infos: writer = utils.CsvWriter(delimiter=u"|") if hash_v and hash_v.md5: hash_str = hash_v.md5.HexDigest().decode("ascii") else: hash_str = u"" # Details about Body format: # https://wiki.sleuthkit.org/index.php?title=Body_file # MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime writer.WriteRow([ hash_str, path, unicode(st.st_ino), unicode(st.st_mode), unicode(st.st_uid), unicode(st.st_gid), unicode(st.st_size), unicode(int(st.st_atime or 0)), unicode(int(st.st_mtime or 0)), unicode(int(st.st_ctime or 0)), unicode(int(st.st_crtime or 0)), ]) yield writer.Content().encode("utf-8")
def _GenerateDefaultExport(self, items): writer = utils.CsvWriter() # Write header. Since we do not stick to a specific timeline format, we # can export a format suited for TimeSketch import. writer.WriteRow([u"Timestamp", u"Datetime", u"Message", u"Timestamp_desc"]) for start in range(0, len(items), self.CHUNK_SIZE): for item in items[start:start + self.CHUNK_SIZE]: writer.WriteRow([ unicode(item.timestamp.AsMicrosecondsSinceEpoch()), unicode(item.timestamp), item.file_path, unicode(item.action), ]) yield writer.Content().encode("utf-8") writer = utils.CsvWriter()
def ProcessSingleTypeExportedValues(self, original_value_type, exported_values): first_value = next(exported_values, None) if not first_value: return yield self.archive_generator.WriteFileHeader( "%s/%s/from_%s.csv" % (self.path_prefix, first_value.__class__.__name__, original_value_type.__name__)) writer = utils.CsvWriter() # Write the CSV header based on first value class and write # the first value itself. All other values are guaranteed # to have the same class (see ProcessSingleTypeExportedValues definition). writer.WriteRow(self._GetCSVHeader(first_value.__class__)) writer.WriteRow(self._GetCSVRow(first_value)) chunk = writer.Content().encode("utf-8") yield self.archive_generator.WriteFileChunk(chunk) # Counter starts from 1, as 1 value has already been written. counter = 1 for batch in utils.Grouper(exported_values, self.ROW_BATCH): counter += len(batch) writer = utils.CsvWriter() for value in batch: writer.WriteRow(self._GetCSVRow(value)) chunk = writer.Content().encode("utf-8") yield self.archive_generator.WriteFileChunk(chunk) yield self.archive_generator.WriteFileFooter() self.export_counts.setdefault( original_value_type.__name__, dict())[first_value.__class__.__name__] = counter
def testCustomDelimiter(self): writer = utils.CsvWriter(delimiter="|") writer.WriteRow(["foo", "bar", "baz"]) self.assertEqual(writer.Content(), "foo|bar|baz\n")
def testSingleRow(self): writer = utils.CsvWriter() writer.WriteRow(["foo", "bar", "baz"]) self.assertEqual(writer.Content(), "foo,bar,baz\n")
def testEmpty(self): writer = utils.CsvWriter() self.assertEqual(writer.Content(), "")