class PurgeTest(unittest.TestCase): def setUp(self): self.tmpdir = mkdtemp() self.origname = self.tmpdir + "orig.couch" self.purgedname = self.tmpdir + "purged.couch" self.origdb = CouchStore(self.origname, 'c'); def tearDown(self): self.origdb.close() os.remove(self.origname) os.remove(self.purgedname) os.rmdir(self.tmpdir) def testPurgeCompact(self): # Save some docs self.origdb.save("foo1", "bar") self.origdb.save("foo2", "baz") self.origdb.save("foo3", "bell") # Delete some seqPurged = deleteAt(self.origdb, "foo2", 10) seqKept = deleteAt(self.origdb, "foo3", 20) self.origdb.commit() os.system("./couch_compact --purge-before 15 " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # Check purged item is not present in seq tree and kept item is self.assertRaises(KeyError, self.newdb.getInfoBySequence, seqPurged) self.assertIsNotNone(self.newdb.getInfoBySequence(seqKept))
def main(): db = CouchStore(sys.argv[2], 'c') for batch in chunks(range(0, int(sys.argv[1])), BATCH_SIZE): insert_multi(db, ["key_" + str(x) for x in batch], [str(x) for x in batch]) db.commit() db.close()
def main(): parser = argparse.ArgumentParser() parser.add_argument("file") parser.add_argument("keys", help="Key(s) to print", nargs="+") parser.add_argument("--unbuffered", help="Disable couchstore io buffering", action="store_true") args = parser.parse_args() db = CouchStore(args.file, 'r', unbuffered=args.unbuffered) for key in args.keys: print(db.get(key)) db.close()
def main(): if len(sys.argv) != 3: print("Usage: example1 <doc count> <file>") exit(1) db = CouchStore(sys.argv[2], 'c') for batch in chunks(list(range(0, int(sys.argv[1]))), BATCH_SIZE): insert_multi(db, ["key_" + str(x) for x in batch], [str(x) for x in batch]) db.commit() db.close()
def testPurgeCompact(self): # Save some docs self.origdb.save("foo1", "bar") self.origdb.save("foo2", "baz") self.origdb.save("foo3", "bell") self.origdb.save("foo4", "a") # Delete some seqPurged = deleteAt(self.origdb, "foo2", 10) seqKept = deleteAt(self.origdb, "foo3", 20) seqLateDelete = deleteAt(self.origdb, "foo4", 11) self.origdb.commit() os.system(path.join(os.getcwd(), "couch_compact") + " --purge-before 15 " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) self.assertRaises(KeyError, self.newdb.getInfo, "foo4") self.newdb.close() os.system(path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqKept) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just before deletion of foo4 we # must find it after compaction self.assertIsNotNone(self.newdb.getInfo("foo4")) self.newdb.close() os.system(path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqLateDelete) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just at deletion of foo4 we # must not find it after compaction self.assertRaises(KeyError, self.newdb.getInfo, "foo4") # Check purged item is not present in seq tree and kept item is self.assertRaises(KeyError, self.newdb.getInfoBySequence, seqPurged) self.assertIsNotNone(self.newdb.getInfoBySequence(seqKept))
def process(filename): store = CouchStore(filename, 'r') for doc_info in store.changesSince(0): size = 0 if doc_info.deleted: size = 0 elif args.memory: size = len(doc_info.getContents(options = CouchStore.DECOMPRESS)) else: size = doc_info.physSize if size >= args.threshold: print doc_info.id store.close()
def process(filename): store = CouchStore(filename, 'r') for doc_info in store.changesSince(0): size = 0 if doc_info.deleted: size = 0 elif args.memory: size = len(doc_info.getContents(options = CouchStore.DECOMPRESS)) else: size = doc_info.physSize if bucketize(size) not in sizeHisto: sizeHisto[bucketize(size)] = 0 sizeHisto[bucketize(size)] += 1 store.close()
def testMultipleMeta(self): k = [] v = [] for i in range(1000): d = DocumentInfo(str(i)) d.revMeta = "hello-%s" % i k.append(d) v.append("world-%s" % i) self.store.saveMultiple(k, v) self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') for doc_info in self.store.changesSince(0): i = int(doc_info.id) self.assertEqual(doc_info.revMeta, "hello-%s" % i) doc_contents = doc_info.getContents() self.assertEqual(doc_contents, "world-%s" % i)
def testMetadataSave(self): info = DocumentInfo("meta") info.revSequence = 23 info.revMeta = "fancy metadata here" info.contentType = DocumentInfo.INVALID_JSON self.store[info] = "the regular non-meta data" self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') gotInfo = self.store.getInfo("meta") self.assertEquals(gotInfo.id, "meta") self.assertEquals(gotInfo.revSequence, info.revSequence) self.assertEquals(gotInfo.revMeta, info.revMeta) self.assertEquals(gotInfo.contentType, info.contentType) self.assertFalse(gotInfo.compressed)
def testMultipleMetaStruct(self): k = [] v = [] for i in range(1000): d = DocumentInfo(str(i)) d.revMeta = struct.pack(">QII", i * 3, i * 2, i) k.append(d) v.append("world-%s" % i) self.store.saveMultiple(k, v) self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') for doc_info in self.store.changesSince(0): i = int(doc_info.id) i3, i2, i1 = struct.unpack(">QII", doc_info.revMeta) self.assertEqual(i3, i * 3) self.assertEqual(i2, i * 2) self.assertEqual(i1, i * 1) doc_contents = doc_info.getContents() self.assertEqual(doc_contents, "world-%s" % doc_info.id)
def testPurgeCompact(self): # Save some docs self.origdb.save("foo1", "bar") self.origdb.save("foo2", "baz") self.origdb.save("foo3", "bell") self.origdb.save("foo4", "a") # Delete some seqPurged = deleteAt(self.origdb, "foo2", 10) seqKept = deleteAt(self.origdb, "foo3", 20) seqLateDelete = deleteAt(self.origdb, "foo4", 11) self.origdb.commit() os.system( path.join(os.getcwd(), "couch_compact") + " --purge-before 15 " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) self.assertRaises(KeyError, self.newdb.getInfo, "foo4") self.newdb.close() os.system( path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqKept) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just before deletion of foo4 we # must find it after compaction self.assertIsNotNone(self.newdb.getInfo("foo4")) self.newdb.close() os.system( path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqLateDelete) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just at deletion of foo4 we # must not find it after compaction self.assertRaises(KeyError, self.newdb.getInfo, "foo4") # Check purged item is not present in seq tree and kept item is self.assertRaises(KeyError, self.newdb.getInfoBySequence, seqPurged) self.assertIsNotNone(self.newdb.getInfoBySequence(seqKept))
def main(): if len(sys.argv) != 3: print "Usage: example1 <doc count> <file>" exit(1) db = CouchStore(sys.argv[2], 'c') for batch in chunks(range(0, int(sys.argv[1])), BATCH_SIZE): insert_multi(db, ["key_" + str(x) for x in batch], [str(x) for x in batch]) db.commit() db.close()
def testPurgeCompact(self): # Save some docs self.origdb.save("foo1", "bar") self.origdb.save("foo2", "baz") self.origdb.save("foo3", "bell") # Delete some seqPurged = deleteAt(self.origdb, "foo2", 10) seqKept = deleteAt(self.origdb, "foo3", 20) self.origdb.commit() os.system("./couch_compact --purge-before 15 " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # Check purged item is not present in seq tree and kept item is self.assertRaises(KeyError, self.newdb.getInfoBySequence, seqPurged) self.assertIsNotNone(self.newdb.getInfoBySequence(seqKept))
def setUp(self): removeIfExists("/tmp/test.couch") self.store = CouchStore("/tmp/test.couch", 'c')
class CouchStoreTest (unittest.TestCase): def setUp(self): removeIfExists("/tmp/test.couch") self.store = CouchStore("/tmp/test.couch", 'c') def tearDown(self): self.store.close() os.remove("/tmp/test.couch") def testBasicSave(self): sequence = self.store.save("foo", "value of foo") self.assertEqual(sequence, 1) self.store.commit() value = self.store.get("foo") self.assertEqual(value, "value of foo") def testMissingKey(self): self.assertRaises(KeyError, self.store.get, "nonexistent") def testBadKey(self): self.assertRaises(TypeError, self.store.get, 0) self.assertRaises(TypeError, self.store.get, None) self.assertRaises(TypeError, self.store.get, [123]) def testInfo(self): value = "value" sequence = self.store.save("foo", value) self.assertEqual(sequence, 1) info = self.store.getInfo("foo") self.assertEqual(info.id, "foo") self.assertEqual(info.sequence, sequence) self.assertFalse(info.deleted) #self.assertEqual(info.size, len(value)) #FIXME: Not currently equal, due to bug in CouchStore itself self.assertEqual(info.getContents(), value) def testInfoBySequence(self): value = "value" sequence = self.store.save("foo", value) self.assertEqual(sequence, 1) info = self.store.getInfoBySequence(sequence) self.assertEqual(info.id, "foo") self.assertEqual(info.sequence, sequence) self.assertFalse(info.deleted) #self.assertEqual(info.size, len(value)) #FIXME: Not currently equal, due to bug in CouchStore itself self.assertEqual(info.getContents(), value) def testMissingSequence(self): self.store.save("foo", "value") self.assertRaises(KeyError, self.store.getInfoBySequence, 99999) self.assertRaises(TypeError, self.store.getInfoBySequence, "huh") def testNoContents(self): info = DocumentInfo("howdy") self.assertRaises(Exception, info.getContents) def testMetadata(self): info = DocumentInfo("meta") info.revSequence = 23 info.revMeta = "fancy metadata here" info.contentType = DocumentInfo.INVALID_JSON self.store[info] = "the regular non-meta data" gotInfo = self.store.getInfo("meta") self.assertEquals(gotInfo.id, "meta") self.assertEquals(gotInfo.revSequence, info.revSequence) self.assertEquals(gotInfo.revMeta, info.revMeta) self.assertEquals(gotInfo.contentType, info.contentType) self.assertFalse(gotInfo.compressed) def testMetadataSave(self): info = DocumentInfo("meta") info.revSequence = 23 info.revMeta = "fancy metadata here" info.contentType = DocumentInfo.INVALID_JSON self.store[info] = "the regular non-meta data" self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') gotInfo = self.store.getInfo("meta") self.assertEquals(gotInfo.id, "meta") self.assertEquals(gotInfo.revSequence, info.revSequence) self.assertEquals(gotInfo.revMeta, info.revMeta) self.assertEquals(gotInfo.contentType, info.contentType) self.assertFalse(gotInfo.compressed) def testCompression(self): value = "this value is text and text is valued" self.store.save("key", value, CouchStore.COMPRESS) self.assertEqual(self.store.get("key", CouchStore.DECOMPRESS), value) info = self.store.getInfo("key") self.assertTrue(info.compressed) def expectedKey(self, i): return "key_%2d" % (i + 1) def expectedValue(self, i): return "Hi there! I'm value #%d!" % (i + 1) def addDocs(self, n): for i in xrange(n): self.store.save(self.expectedKey(i), self.expectedValue(i)) def addBulkDocs(self, n): ids = [self.expectedKey(i) for i in xrange(n)] datas = [self.expectedValue(i) for i in xrange(n)] self.store.saveMultiple(ids, datas) def testMultipleDocs(self): self.addDocs(1000) for i in xrange(1000): self.assertEqual(self.store[self.expectedKey(i)], self.expectedValue(i)) info = self.store.getDbInfo() self.assertEquals(info.filename, "/tmp/test.couch") self.assertEquals(info.last_sequence, 1000) self.assertEquals(info.doc_count, 1000) self.assertEquals(info.deleted_count, 0) def testBulkDocs(self): self.addBulkDocs(1000) for i in xrange(1000): self.assertEqual(self.store[self.expectedKey(i)], self.expectedValue(i)) def testDelete(self): self.store["key"] = "value" del self.store["key"] self.assertRaises(KeyError, self.store.get, "key") info = self.store.getInfo("key") self.assertTrue(info.deleted) self.assertEqual(info.id, "key") info = self.store.getDbInfo() self.assertEquals(info.last_sequence, 2) self.assertEquals(info.doc_count, 0) self.assertEquals(info.deleted_count, 1) def testChangesSince(self): self.addDocs(50) changes = self.store.changesSince(0) self.assertEqual(len(changes), 50) for i in xrange(50): self.assertEqual(changes[i].id, self.expectedKey(i)) def testForAllDocs(self): self.addDocs(50) docCount = [0] def checkDoc(docInfo): self.assertEquals(docInfo.id, self.expectedKey(docCount[0])) docCount[0] += 1 self.store.forEachDoc(None, None, checkDoc) self.assertEqual(docCount[0], 50) def testDocumentInfoRepr(self): self.addDocs(1) def checkDoc(docInfo): expected = "DocumentInfo('%s', %d bytes)" % (docInfo.id, docInfo.physSize) self.assertEquals(str(docInfo), expected) self.assertEquals(repr(docInfo), expected) self.store.forEachDoc(None, None, checkDoc) def testForSomeDocs(self): self.addDocs(50) docCount = [0] def checkDoc(docInfo): self.assertEquals(docInfo.id, self.expectedKey(docCount[0])) docCount[0] += 1 self.store.forEachDoc(None, self.expectedKey(10), checkDoc) self.assertEqual(docCount[0], 11) docCount = [10] self.store.forEachDoc(self.expectedKey(10), None, checkDoc) self.assertEqual(docCount[0], 50) docCount = [10] self.store.forEachDoc(self.expectedKey(10), self.expectedKey(20), checkDoc) self.assertEqual(docCount[0], 21) def testLocalDocs(self): locals = self.store.localDocs self.assertRaises(KeyError, locals.__getitem__, "hello") locals["hello"] = "goodbye" self.assertEqual(locals["hello"], "goodbye") locals["hello"] = "bonjour" self.assertEqual(locals["hello"], "bonjour") del locals["hello"] self.assertRaises(KeyError, locals.__getitem__, "hello") def testSizedBuf(self): # Converting Python strings to/from SizedBufs is tricky enough (when # the strings might contain null bytes) that it's worth a unit test of # its own. data = "foooooobarrrr" buf = SizedBuf(data) self.assertEqual(buf.size, len(data)) self.assertEqual(str(buf), data) # Now try some binary data with nul bytes in it: data = "foo\000bar" buf = SizedBuf(data) self.assertEqual(buf.size, len(data)) self.assertEqual(str(buf), data) def testBinaryMeta(self): # Make sure binary data, as produced by Python's struct module, works # in revMeta. packed = struct.pack(">QII", 0, 1, 2) d = DocumentInfo("bin") d.revMeta = packed self.store[d] = "value" doc_info = self.store.getInfo("bin") self.assertEqual(doc_info.revMeta, packed) i1, i2, i3 = struct.unpack(">QII", doc_info.revMeta) self.assertEqual(i1, 0) self.assertEqual(i2, 1) self.assertEqual(i3, 2) def testMultipleMeta(self): k = [] v = [] for i in range(1000): d = DocumentInfo(str(i)) d.revMeta = "hello-%s" % i k.append(d) v.append("world-%s" % i) self.store.saveMultiple(k, v) self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') for doc_info in self.store.changesSince(0): i = int(doc_info.id) self.assertEqual(doc_info.revMeta, "hello-%s" % i) doc_contents = doc_info.getContents() self.assertEqual(doc_contents, "world-%s" % i) def testMultipleMetaStruct(self): k = [] v = [] for i in range(1000): d = DocumentInfo(str(i)) d.revMeta = struct.pack(">QII", i * 3, i * 2, i) k.append(d) v.append("world-%s" % i) self.store.saveMultiple(k, v) self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') for doc_info in self.store.changesSince(0): i = int(doc_info.id) i3, i2, i1 = struct.unpack(">QII", doc_info.revMeta) self.assertEqual(i3, i * 3) self.assertEqual(i2, i * 2) self.assertEqual(i1, i * 1) doc_contents = doc_info.getContents() self.assertEqual(doc_contents, "world-%s" % doc_info.id)
class PurgeTest(unittest.TestCase): def setUp(self): self.tmpdir = mkdtemp() self.origname = path.join(self.tmpdir, "orig.couch") self.purgedname = path.join(self.tmpdir, "purged.couch") self.origdb = CouchStore(self.origname, 'c'); def tearDown(self): try: self.origdb.close() except: pass try: self.newdb.close() except: pass try: os.remove(self.origname) except: pass try: os.remove(self.purgedname) except: pass try: os.rmdir(self.tmpdir) except: pass def testPurgeCompact(self): # Save some docs self.origdb.save("foo1", "bar") self.origdb.save("foo2", "baz") self.origdb.save("foo3", "bell") self.origdb.save("foo4", "a") # Delete some seqPurged = deleteAt(self.origdb, "foo2", 10) seqKept = deleteAt(self.origdb, "foo3", 20) seqLateDelete = deleteAt(self.origdb, "foo4", 11) self.origdb.commit() os.system(path.join(os.getcwd(), "couch_compact") + " --purge-before 15 " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) self.assertRaises(KeyError, self.newdb.getInfo, "foo4") self.newdb.close() os.system(path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqKept) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just before deletion of foo4 we # must find it after compaction self.assertIsNotNone(self.newdb.getInfo("foo4")) self.newdb.close() os.system(path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqLateDelete) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just at deletion of foo4 we # must not find it after compaction self.assertRaises(KeyError, self.newdb.getInfo, "foo4") # Check purged item is not present in seq tree and kept item is self.assertRaises(KeyError, self.newdb.getInfoBySequence, seqPurged) self.assertIsNotNone(self.newdb.getInfoBySequence(seqKept))
def setUp(self): self.tmpdir = mkdtemp() self.dbname = path.join(self.tmpdir, "testing.couch") self.db = CouchStore(self.dbname, 'c')
class RewindTest(unittest.TestCase): def setUp(self): self.tmpdir = mkdtemp() self.dbname = self.tmpdir + "testing.couch" self.db = CouchStore(self.dbname, 'c'); def tearDown(self): try: self.db.close() except: pass try: os.remove(self.dbname) except: pass try: os.rmdir(self.tmpdir) except: pass def testRewind(self): # Save some docs self.db.save("foo1", "bar") self.db.save("foo2", "baz") self.db.save("foo3", "bell") self.db.save("foo4", "a") self.db.commit() # Edit some docs self.db.save("foo1", "new_bar") self.db.save("foo2", "new_baz") self.db.save("foo3", "new_bell") self.db.save("foo4", "new_a") self.db.commit() # The edits happened... self.assertNotEqual(self.db["foo3"], "bell"); self.assertEqual(self.db["foo4"], "new_a"); # rewind self.db.rewindHeader() # did we go back in time? self.assertEqual(self.db["foo3"], "bell"); self.assertNotEqual(self.db["foo4"], "new_a");
def setUp(self): self.tmpdir = mkdtemp() self.origname = self.tmpdir + "orig.couch" self.purgedname = self.tmpdir + "purged.couch" self.origdb = CouchStore(self.origname, 'c');
def setUp(self): self.tmpdir = mkdtemp() self.origname = path.join(self.tmpdir, "orig.couch") self.purgedname = path.join(self.tmpdir, "purged.couch") self.origdb = CouchStore(self.origname, 'c')
class ChangeCountTest(unittest.TestCase): def setUp(self): self.tmpdir = mkdtemp() self.dbname = path.join(self.tmpdir, "testing.couch") self.db = CouchStore(self.dbname, 'c') def tearDown(self): try: self.db.commit() self.db.close() except: pass try: os.remove(self.dbname) except: pass try: os.rmdir(self.tmpdir) except: pass def bulkSet(self, prefix, n): ids = [prefix + str(x) for x in xrange(n)] datas = ["val" + str(x) for x in xrange(n)] self.db.saveMultiple(ids, datas) def testRewind(self): # Save some docs self.db.save("foo1", "bar") self.db.save("foo2", "baz") self.db.save("foo3", "bell") self.db.save("foo4", "a") self.assertEqual(self.db.changesCount(0, 100), 4) self.db.save("foo1", "new_bar") self.db.save("foo2", "new_baz") self.db.save("foo3", "new_bell") self.db.save("foo4", "new_a") self.assertEqual(self.db.changesCount(0, 100), 4) self.bulkSet("foo", 100) self.assertEqual(self.db.changesCount(0, 108), 100) self.assertEqual(self.db.changesCount(0, 100), 92) self.assertEqual(self.db.changesCount(1, 100), 92) self.assertNotEqual(self.db.changesCount(12, 100), 92) self.assertEqual(self.db.changesCount(50, 99), 50) self.assertEqual(self.db.changesCount(50, 100), 51) self.assertEqual(self.db.changesCount(50, 108), 59) self.assertEqual(self.db.changesCount(51, 100), 50) self.assertEqual(self.db.changesCount(91, 1000), 18) self.db.save("foo88", "tval") self.assertEqual(self.db.changesCount(50, 108), 58) self.assertEqual(self.db.changesCount(50, 109), 59)
class PurgeTest(unittest.TestCase): def setUp(self): self.tmpdir = mkdtemp() self.origname = path.join(self.tmpdir, "orig.couch") self.purgedname = path.join(self.tmpdir, "purged.couch") self.origdb = CouchStore(self.origname, 'c') def tearDown(self): try: self.origdb.close() except: pass try: self.newdb.close() except: pass try: os.remove(self.origname) except: pass try: os.remove(self.purgedname) except: pass try: os.rmdir(self.tmpdir) except: pass def testPurgeCompact(self): # Save some docs self.origdb.save("foo1", "bar") self.origdb.save("foo2", "baz") self.origdb.save("foo3", "bell") self.origdb.save("foo4", "a") # Delete some seqPurged = deleteAt(self.origdb, "foo2", 10) seqKept = deleteAt(self.origdb, "foo3", 20) seqLateDelete = deleteAt(self.origdb, "foo4", 11) self.origdb.commit() os.system( path.join(os.getcwd(), "couch_compact") + " --purge-before 15 " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) self.assertRaises(KeyError, self.newdb.getInfo, "foo4") self.newdb.close() os.system( path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqKept) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just before deletion of foo4 we # must find it after compaction self.assertIsNotNone(self.newdb.getInfo("foo4")) self.newdb.close() os.system( path.join(os.getcwd(), "couch_compact") + " --purge-before 15 --purge-only-upto-seq " + str(seqLateDelete) + " " + self.origname + " " + self.purgedname) self.newdb = CouchStore(self.purgedname) # Check purged item is not present in key tree and kept item is self.assertRaises(KeyError, self.newdb.getInfo, "foo2") self.assertIsNotNone(self.newdb.getInfo("foo3")) # with purge-only-upto-seq just at deletion of foo4 we # must not find it after compaction self.assertRaises(KeyError, self.newdb.getInfo, "foo4") # Check purged item is not present in seq tree and kept item is self.assertRaises(KeyError, self.newdb.getInfoBySequence, seqPurged) self.assertIsNotNone(self.newdb.getInfoBySequence(seqKept))
class CouchStoreTest(unittest.TestCase): def setUp(self): removeIfExists("/tmp/test.couch") self.store = CouchStore("/tmp/test.couch", 'c') def tearDown(self): self.store.close() os.remove("/tmp/test.couch") def testBasicSave(self): sequence = self.store.save("foo", "value of foo") self.assertEqual(sequence, 1) self.store.commit() value = self.store.get("foo") self.assertEqual(value, "value of foo") def testMissingKey(self): self.assertRaises(KeyError, self.store.get, "nonexistent") def testBadKey(self): self.assertRaises(TypeError, self.store.get, 0) self.assertRaises(TypeError, self.store.get, None) self.assertRaises(TypeError, self.store.get, [123]) def testInfo(self): value = "value" sequence = self.store.save("foo", value) self.assertEqual(sequence, 1) info = self.store.getInfo("foo") self.assertEqual(info.id, "foo") self.assertEqual(info.sequence, sequence) self.assertFalse(info.deleted) #self.assertEqual(info.size, len(value)) #FIXME: Not currently equal, due to bug in CouchStore itself self.assertEqual(info.getContents(), value) def testInfoBySequence(self): value = "value" sequence = self.store.save("foo", value) self.assertEqual(sequence, 1) info = self.store.getInfoBySequence(sequence) self.assertEqual(info.id, "foo") self.assertEqual(info.sequence, sequence) self.assertFalse(info.deleted) #self.assertEqual(info.size, len(value)) #FIXME: Not currently equal, due to bug in CouchStore itself self.assertEqual(info.getContents(), value) def testMissingSequence(self): self.store.save("foo", "value") self.assertRaises(KeyError, self.store.getInfoBySequence, 99999) self.assertRaises(TypeError, self.store.getInfoBySequence, "huh") def testNoContents(self): info = DocumentInfo("howdy") self.assertRaises(Exception, info.getContents) def testMetadata(self): info = DocumentInfo("meta") info.revSequence = 23 info.revMeta = "fancy metadata here" info.contentType = DocumentInfo.INVALID_JSON self.store[info] = "the regular non-meta data" gotInfo = self.store.getInfo("meta") self.assertEquals(gotInfo.id, "meta") self.assertEquals(gotInfo.revSequence, info.revSequence) self.assertEquals(gotInfo.revMeta, info.revMeta) self.assertEquals(gotInfo.contentType, info.contentType) self.assertFalse(gotInfo.compressed) def testMetadataSave(self): info = DocumentInfo("meta") info.revSequence = 23 info.revMeta = "fancy metadata here" info.contentType = DocumentInfo.INVALID_JSON self.store[info] = "the regular non-meta data" self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') gotInfo = self.store.getInfo("meta") self.assertEquals(gotInfo.id, "meta") self.assertEquals(gotInfo.revSequence, info.revSequence) self.assertEquals(gotInfo.revMeta, info.revMeta) self.assertEquals(gotInfo.contentType, info.contentType) self.assertFalse(gotInfo.compressed) def testCompression(self): value = "this value is text and text is valued" self.store.save("key", value, CouchStore.COMPRESS) self.assertEqual(self.store.get("key", CouchStore.DECOMPRESS), value) info = self.store.getInfo("key") self.assertTrue(info.compressed) def expectedKey(self, i): return "key_%2d" % (i + 1) def expectedValue(self, i): return "Hi there! I'm value #%d!" % (i + 1) def addDocs(self, n): for i in xrange(n): self.store.save(self.expectedKey(i), self.expectedValue(i)) def addBulkDocs(self, n): ids = [self.expectedKey(i) for i in xrange(n)] datas = [self.expectedValue(i) for i in xrange(n)] self.store.saveMultiple(ids, datas) def testMultipleDocs(self): self.addDocs(1000) for i in xrange(1000): self.assertEqual(self.store[self.expectedKey(i)], self.expectedValue(i)) info = self.store.getDbInfo() self.assertEquals(info.filename, "/tmp/test.couch") self.assertEquals(info.last_sequence, 1000) self.assertEquals(info.doc_count, 1000) self.assertEquals(info.deleted_count, 0) def testBulkDocs(self): self.addBulkDocs(1000) for i in xrange(1000): self.assertEqual(self.store[self.expectedKey(i)], self.expectedValue(i)) def testDelete(self): self.store["key"] = "value" del self.store["key"] self.assertRaises(KeyError, self.store.get, "key") info = self.store.getInfo("key") self.assertTrue(info.deleted) self.assertEqual(info.id, "key") info = self.store.getDbInfo() self.assertEquals(info.last_sequence, 2) self.assertEquals(info.doc_count, 0) self.assertEquals(info.deleted_count, 1) def testChangesSince(self): self.addDocs(50) changes = self.store.changesSince(0) self.assertEqual(len(changes), 50) for i in xrange(50): self.assertEqual(changes[i].id, self.expectedKey(i)) def testForAllDocs(self): self.addDocs(50) docCount = [0] def checkDoc(docInfo): self.assertEquals(docInfo.id, self.expectedKey(docCount[0])) docCount[0] += 1 self.store.forEachDoc(None, None, checkDoc) self.assertEqual(docCount[0], 50) def testDocumentInfoRepr(self): self.addDocs(1) def checkDoc(docInfo): expected = "DocumentInfo('%s', %d bytes)" % (docInfo.id, docInfo.physSize) self.assertEquals(str(docInfo), expected) self.assertEquals(repr(docInfo), expected) self.store.forEachDoc(None, None, checkDoc) def testForSomeDocs(self): self.addDocs(50) docCount = [0] def checkDoc(docInfo): self.assertEquals(docInfo.id, self.expectedKey(docCount[0])) docCount[0] += 1 self.store.forEachDoc(None, self.expectedKey(10), checkDoc) self.assertEqual(docCount[0], 11) docCount = [10] self.store.forEachDoc(self.expectedKey(10), None, checkDoc) self.assertEqual(docCount[0], 50) docCount = [10] self.store.forEachDoc(self.expectedKey(10), self.expectedKey(20), checkDoc) self.assertEqual(docCount[0], 21) def testLocalDocs(self): locals = self.store.localDocs self.assertRaises(KeyError, locals.__getitem__, "hello") locals["hello"] = "goodbye" self.assertEqual(locals["hello"], "goodbye") locals["hello"] = "bonjour" self.assertEqual(locals["hello"], "bonjour") del locals["hello"] self.assertRaises(KeyError, locals.__getitem__, "hello") def testSizedBuf(self): # Converting Python strings to/from SizedBufs is tricky enough (when # the strings might contain null bytes) that it's worth a unit test of # its own. data = "foooooobarrrr" buf = SizedBuf(data) self.assertEqual(buf.size, len(data)) self.assertEqual(str(buf), data) # Now try some binary data with nul bytes in it: data = "foo\000bar" buf = SizedBuf(data) self.assertEqual(buf.size, len(data)) self.assertEqual(str(buf), data) def testBinaryMeta(self): # Make sure binary data, as produced by Python's struct module, works # in revMeta. packed = struct.pack(">QII", 0, 1, 2) d = DocumentInfo("bin") d.revMeta = packed self.store[d] = "value" doc_info = self.store.getInfo("bin") self.assertEqual(doc_info.revMeta, packed) i1, i2, i3 = struct.unpack(">QII", doc_info.revMeta) self.assertEqual(i1, 0) self.assertEqual(i2, 1) self.assertEqual(i3, 2) def testMultipleMeta(self): k = [] v = [] for i in range(1000): d = DocumentInfo(str(i)) d.revMeta = "hello-%s" % i k.append(d) v.append("world-%s" % i) self.store.saveMultiple(k, v) self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') for doc_info in self.store.changesSince(0): i = int(doc_info.id) self.assertEqual(doc_info.revMeta, "hello-%s" % i) doc_contents = doc_info.getContents() self.assertEqual(doc_contents, "world-%s" % i) def testMultipleMetaStruct(self): k = [] v = [] for i in range(1000): d = DocumentInfo(str(i)) d.revMeta = struct.pack(">QII", i * 3, i * 2, i) k.append(d) v.append("world-%s" % i) self.store.saveMultiple(k, v) self.store.commit() self.store.close() self.store = CouchStore("/tmp/test.couch", 'r') for doc_info in self.store.changesSince(0): i = int(doc_info.id) i3, i2, i1 = struct.unpack(">QII", doc_info.revMeta) self.assertEqual(i3, i * 3) self.assertEqual(i2, i * 2) self.assertEqual(i1, i * 1) doc_contents = doc_info.getContents() self.assertEqual(doc_contents, "world-%s" % doc_info.id)
def setUp(self): self.tmpdir = mkdtemp() self.dbname = self.tmpdir + "testing.couch" self.db = CouchStore(self.dbname, 'c');
class ChangeCountTest(unittest.TestCase): def setUp(self): self.tmpdir = mkdtemp() self.dbname = path.join(self.tmpdir, "testing.couch") self.db = CouchStore(self.dbname, 'c'); def tearDown(self): try: self.db.commit() self.db.close() except: pass try: os.remove(self.dbname) except: pass try: os.rmdir(self.tmpdir) except: pass def bulkSet(self, prefix, n): ids = [prefix + str(x) for x in xrange(n)] datas = ["val" + str(x) for x in xrange(n)] self.db.saveMultiple(ids, datas) def testRewind(self): # Save some docs self.db.save("foo1", "bar") self.db.save("foo2", "baz") self.db.save("foo3", "bell") self.db.save("foo4", "a") self.assertEqual(self.db.changesCount(0,100), 4) self.db.save("foo1", "new_bar") self.db.save("foo2", "new_baz") self.db.save("foo3", "new_bell") self.db.save("foo4", "new_a") self.assertEqual(self.db.changesCount(0,100), 4) self.bulkSet("foo", 100) self.assertEqual(self.db.changesCount(0, 108), 100) self.assertEqual(self.db.changesCount(0, 100), 92) self.assertEqual(self.db.changesCount(1, 100), 92) self.assertNotEqual(self.db.changesCount(12, 100), 92) self.assertEqual(self.db.changesCount(50, 99), 50) self.assertEqual(self.db.changesCount(50, 100), 51) self.assertEqual(self.db.changesCount(50, 108), 59) self.assertEqual(self.db.changesCount(51, 100), 50) self.assertEqual(self.db.changesCount(91, 1000), 18) self.db.save("foo88", "tval") self.assertEqual(self.db.changesCount(50, 108), 58) self.assertEqual(self.db.changesCount(50, 109), 59)
def setUp(self): self.tmpdir = mkdtemp() self.origname = path.join(self.tmpdir, "orig.couch") self.purgedname = path.join(self.tmpdir, "purged.couch") self.origdb = CouchStore(self.origname, 'c');
def setUp(self): self.tmpdir = mkdtemp() self.dbname = path.join(self.tmpdir, "testing.couch") self.db = CouchStore(self.dbname, 'c');
class RewindTest(unittest.TestCase): def setUp(self): self.tmpdir = mkdtemp() self.dbname = path.join(self.tmpdir, "testing.couch") self.db = CouchStore(self.dbname, 'c'); def tearDown(self): try: self.db.close() except: pass try: os.remove(self.dbname) except: pass try: os.rmdir(self.tmpdir) except: pass def testRewind(self): # Save some docs self.db.save("foo1", "bar") self.db.save("foo2", "baz") self.db.save("foo3", "bell") self.db.save("foo4", "a") self.db.commit() # Edit some docs self.db.save("foo1", "new_bar") self.db.save("foo2", "new_baz") self.db.save("foo3", "new_bell") self.db.save("foo4", "new_a") self.db.commit() # The edits happened... self.assertNotEqual(self.db["foo3"], "bell"); self.assertEqual(self.db["foo4"], "new_a"); # rewind self.db.rewindHeader() # did we go back in time? self.assertEqual(self.db["foo3"], "bell"); self.assertNotEqual(self.db["foo4"], "new_a");