def testMultipleRows(self): writer = csv.Writer() writer.WriteRow(["foo", "quux"]) writer.WriteRow(["bar", "norf"]) writer.WriteRow(["baz", "thud"]) self.assertEqual(writer.Content(), "foo,quux\nbar,norf\nbaz,thud\n")
def testUnicode(self): writer = csv.Writer() writer.WriteRow(["jodła", "świerk", "dąb"]) writer.WriteRow(["żyto", "jęczmień", "ryż"]) self.assertEqual(writer.Content(), "jodła,świerk,dąb\nżyto,jęczmień,ryż\n")
def ProcessSingleTypeExportedValues(self, original_value_type, exported_values): first_value = next(exported_values, None) if not first_value: return yield self.archive_generator.WriteFileHeader( "%s/%s/from_%s.csv" % (self.path_prefix, first_value.__class__.__name__, original_value_type.__name__)) writer = csv.Writer() # Write the CSV header based on first value class and write # the first value itself. All other values are guaranteed # to have the same class (see ProcessSingleTypeExportedValues definition). writer.WriteRow(self._GetCSVHeader(first_value.__class__)) writer.WriteRow(self._GetCSVRow(first_value)) chunk = writer.Content().encode("utf-8") yield self.archive_generator.WriteFileChunk(chunk) # Counter starts from 1, as 1 value has already been written. counter = 1 for batch in collection.Batch(exported_values, self.ROW_BATCH): counter += len(batch) writer = csv.Writer() for value in batch: writer.WriteRow(self._GetCSVRow(value)) chunk = writer.Content().encode("utf-8") yield self.archive_generator.WriteFileChunk(chunk) yield self.archive_generator.WriteFileFooter() self.export_counts.setdefault( original_value_type.__name__, dict())[first_value.__class__.__name__] = counter
def testCustomDelimiter(self): writer = csv.Writer(delimiter="|") writer.WriteRow(["foo", "bar", "baz"]) self.assertEqual(writer.Content(), "foo|bar|baz\n")
def testSingleRow(self): writer = csv.Writer() writer.WriteRow(["foo", "bar", "baz"]) self.assertEqual(writer.Content(), "foo,bar,baz\n")
def testEmpty(self): writer = csv.Writer() self.assertEqual(writer.Content(), "")