def test_transitive_keep(self): with Connector().create(self.store1, "public.data", "test.ignore") as w1: with Connector().create(self.store1, "public.data", "test.ignore") as w2: w2.write('FILE', 'test') w2.commit() doc2 = w2.getDoc() rev2 = w2.getRev() # create a reference from w1 to w2 w1.write('PDSD', struct.dumps([struct.DocLink(self.store2, doc2)])) w1.commit() doc1 = w1.getDoc() rev1 = w1.getRev() # w2 is closed now, w1 still open, should prevent gc self.gc(self.store1) l = Connector().lookupDoc(doc1) self.assertEqual(l.revs(), [rev1]) self.assertEqual(l.preRevs(), []) self.assertEqual(Connector().lookupRev(rev1), [self.store1]) l = Connector().lookupDoc(doc2) self.assertEqual(l.revs(), [rev2]) self.assertEqual(l.preRevs(), []) self.assertEqual(Connector().lookupRev(rev2), [self.store1])
def performSync(self, doc, strategy): watch = self.watchDoc(doc, connector.Watch.EVENT_MODIFIED) self.startSync(strategy, self.store1, self.store2) # first wait until the doc gets changed while True: watch.reset() self.assertTrue(watch.waitForWatch()) l = Connector().lookupDoc(doc) if len(l.revs()) == 1: break self.assertEqual(len(l.stores()), 2) self.assertTrue(self.store1 in l.stores()) self.assertTrue(self.store2 in l.stores()) # wait until sync_worker moved on result = self.erlCall( """peerdrive_sync_locks:lock(<<16#""" + doc.encode("hex") + """:128>>), peerdrive_sync_locks:unlock(<<16#""" + doc.encode("hex") + """:128>>).""" ) self.assertEqual(result, "{ok, ok}") return l
def test_suspend_multi(self): (doc, rev1, rev_s1) = self.createSuspendDoc() with Connector().update(self.store1, doc, rev1) as w: w.writeAll('FILE', 'forward') w.commit() rev2 = w.getRev() with Connector().update(self.store1, doc, rev2) as w: w.writeAll('FILE', 'Hail to the king, baby!') w.suspend() rev_s2 = w.getRev() l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev2]) self.assertEqual(len(l.preRevs()), 2) self.assertTrue(rev_s1 in l.preRevs()) self.assertTrue(rev_s2 in l.preRevs()) s = Connector().stat(rev_s1) self.assertEqual(s.parents(), [rev1]) s = Connector().stat(rev_s2) self.assertEqual(s.parents(), [rev2]) self.assertRevContent(self.store1, rev1, {'FILE' : 'ok'}) self.assertRevContent(self.store1, rev_s1, {'FILE' : 'update'}) self.assertRevContent(self.store1, rev2, {'FILE' : 'forward'}) self.assertRevContent(self.store1, rev_s2, {'FILE' : 'Hail to the king, baby!'})
def test_suspend(self): (doc, rev1, rev2) = self.createSuspendDoc() l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev1]) self.assertEqual(l.preRevs(), [rev2]) self.assertRevContent(self.store1, rev1, {'FILE' : 'ok'}) self.assertRevContent(self.store1, rev2, {'FILE' : 'update'})
def test_resume_abort(self): (doc, rev1, rev2) = self.createSuspendDoc() with Connector().resume(self.store1, doc, rev2) as w: w.writeAll('FILE', 'Hail to the king, baby!') l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev1]) self.assertEqual(l.preRevs(), [rev2]) w.close() l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev1]) self.assertEqual(l.preRevs(), [rev2]) self.assertRevContent(self.store1, rev1, {'FILE' : 'ok'}) self.assertRevContent(self.store1, rev2, {'FILE' : 'update'})
def test_resume_wrong(self): (doc, rev1, rev2) = self.createSuspendDoc() self.assertRaises(IOError, Connector().resume, self.store1, doc, rev1) l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev1]) self.assertEqual(l.preRevs(), [rev2]) self.assertRevContent(self.store1, rev1, {'FILE' : 'ok'}) self.assertRevContent(self.store1, rev2, {'FILE' : 'update'})
def update(self, updateItem=True): # reset everything self.__valid = False self.__icon = None for i in xrange(len(self.__columnDefs)): column = self.__columnDefs[i] if column.derived(): self.__columnValues[i] = column.default() # determine revision needMerge = False isReplicated = False if self.__doc: l = Connector().lookupDoc(self.__doc) isReplicated = len(l.stores()) > 1 revisions = l.revs() if len(revisions) == 0: return elif len(revisions) > 1: needMerge = True if updateItem: self.__item[''].update() self.__rev = self.__item[''].rev() # stat try: s = Connector().stat(self.__rev) except IOError: return self.__uti = s.type() if needMerge or isReplicated: image = QtGui.QImage(Registry().getIcon(s.type())) painter = QtGui.QPainter() painter.begin(image) if needMerge: painter.drawImage(0, 16, QtGui.QImage("icons/emblems/split.png")) elif isReplicated: painter.drawImage( 0, 16, QtGui.QImage("icons/emblems/distributed.png")) painter.end() self.__icon = QtGui.QIcon(QtGui.QPixmap.fromImage(image)) else: self.__icon = QtGui.QIcon(Registry().getIcon(s.type())) self.__isFolder = Registry().conformes(self.__uti, "org.peerdrive.folder") self.__replacable = not needMerge and not self.__isFolder self.__valid = True self.__updateColumns(s)
def test_create_keep_handle(self): with Connector().create(self.store1, "public.data", "test.ignore") as w: w.commit() doc = w.getDoc() rev = w.getRev() # perform a GC cycle self.gc(self.store1) l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev]) self.assertEqual(l.preRevs(), []) Connector().stat(rev)
def test_collect(self): # deliberately close handle after creating! with Connector().create(self.store1, "public.data", "test.ignore") as w: w.commit() doc = w.getDoc() rev = w.getRev() # perform a GC cycle self.gc(self.store1) l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), []) self.assertEqual(l.preRevs(), []) self.assertRaises(IOError, Connector().stat, rev)
def test_sync_ff_err(self): (doc, rev1, rev2) = self.createMerge("public.data", {}, {"FILE": "left1"}, {"FILE": "right1"}) watch = self.watchDoc(doc, connector.Watch.EVENT_MODIFIED) self.startSync("ff", self.store1, self.store2) self.startSync("ff", self.store2, self.store1) self.assertFalse(watch.waitForWatch(1)) # check that doc is not synced l = Connector().lookupDoc(doc) self.assertEqual(len(l.revs()), 2) self.assertEqual(l.rev(self.store1), rev1) self.assertEqual(l.rev(self.store2), rev2)
def update(self, updateItem = True): # reset everything self.__valid = False self.__icon = None for i in xrange(len(self.__columnDefs)): column = self.__columnDefs[i] if column.derived(): self.__columnValues[i] = column.default() # determine revision needMerge = False isReplicated = False if self.__doc: l = Connector().lookupDoc(self.__doc) isReplicated = len(l.stores()) > 1 revisions = l.revs() if len(revisions) == 0: return elif len(revisions) > 1: needMerge = True if updateItem: self.__item[''].update() self.__rev = self.__item[''].rev() # stat try: s = Connector().stat(self.__rev) except IOError: return self.__uti = s.type() if needMerge or isReplicated: image = QtGui.QImage(Registry().getIcon(s.type())) painter = QtGui.QPainter() painter.begin(image) if needMerge: painter.drawImage(0, 16, QtGui.QImage("icons/emblems/split.png")) elif isReplicated: painter.drawImage(0, 16, QtGui.QImage("icons/emblems/distributed.png")) painter.end() self.__icon = QtGui.QIcon(QtGui.QPixmap.fromImage(image)) else: self.__icon = QtGui.QIcon(Registry().getIcon(s.type())) self.__isFolder = Registry().conformes(self.__uti, "org.peerdrive.folder") self.__replacable = not needMerge and not self.__isFolder self.__valid = True self.__updateColumns(s)
def test_resume_suspend_orig(self): (doc, rev1, rev2) = self.createSuspendDoc() with Connector().resume(self.store1, doc, rev2) as w: w.suspend() rev3 = w.getRev() l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev1]) self.assertEqual(l.preRevs(), [rev3]) s = Connector().stat(rev3) self.assertEqual(s.parents(), [rev1]) self.assertRevContent(self.store1, rev1, {'FILE' : 'ok'}) self.assertRevContent(self.store1, rev3, {'FILE' : 'update'})
def test_resume_commit(self): (doc, rev1, rev2) = self.createSuspendDoc() with Connector().resume(self.store1, doc, rev2) as w: w.writeAll('FILE', 'What are you waiting for, christmas?') w.commit() rev3 = w.getRev() l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev3]) self.assertEqual(len(l.preRevs()), 0) s = Connector().stat(rev3) self.assertEqual(s.parents(), [rev1]) self.assertRevContent(self.store1, rev1, {'FILE' : 'ok'}) self.assertRevContent(self.store1, rev3, {'FILE' : 'What are you waiting for, christmas?'})
def test_sync_ff_err(self): (doc, rev1, rev2) = self.createMerge("public.data", {}, {'FILE' : "left1"}, {'FILE' : "right1"}) watch = self.watchDoc(doc, connector.Watch.EVENT_MODIFIED) self.startSync('ff', self.store1, self.store2) self.startSync('ff', self.store2, self.store1) self.assertFalse(watch.waitForWatch(1)) # check that doc is not synced l = Connector().lookupDoc(doc) self.assertEqual(len(l.revs()), 2) self.assertEqual(l.rev(self.store1), rev1) self.assertEqual(l.rev(self.store2), rev2)
def test_resume_suspend_mod(self): (doc, rev1, rev2) = self.createSuspendDoc() with Connector().resume(self.store1, doc, rev2) as w: w.writeAll("FILE", "What are you waiting for, christmas?") w.suspend() rev3 = w.getRev() l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev1]) self.assertEqual(l.preRevs(), [rev3]) s = Connector().stat(rev3) self.assertEqual(s.parents(), [rev1]) self.assertRevContent(self.store1, rev1, {"FILE": "ok"}) self.assertRevContent(self.store1, rev3, {"FILE": "What are you waiting for, christmas?"})
def test_sticky(self): # create the document which should get replicated w = self.create(self.store1) w.writeAll("FILE", "foobar") w.commit() doc = w.getDoc() rev = w.getRev() # create sticky contianer on first store s = struct.Folder() with s.create(self.store1, "foo") as dummy: s.append(struct.DocLink(self.store1, doc)) s.save() contDoc = s.getDoc() # need a dummy folder on both stores self.createCommon( [self.store1, self.store2], "org.peerdrive.folder", data={"PDSD": struct.dumps([{"": struct.DocLink(self.store1, contDoc)}])}, ) watch1 = self.watchDoc(doc, connector.Watch.EVENT_REPLICATED) watch2 = self.watchRev(rev, connector.Watch.EVENT_REPLICATED) # now replicate the folder to 2nd store Connector().replicateDoc(self.store1, contDoc, self.store2) # wait for sticky replicatin to happen self.assertTrue(watch1.waitForWatch()) self.assertTrue(watch2.waitForWatch()) # check doc (with rev) to exist on all stores l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev]) self.assertEqual(len(l.stores(rev)), 2) self.assertTrue(self.store1 in l.stores(rev)) self.assertTrue(self.store2 in l.stores(rev)) l = Connector().lookupRev(rev) self.assertEqual(len(l), 2) self.assertTrue(self.store1 in l) self.assertTrue(self.store2 in l)
def test_forget(self): (doc, rev1, rev_s1) = self.createSuspendDoc() with Connector().update(self.store1, doc, rev1) as w: w.writeAll('FILE', 'forward') w.commit() rev2 = w.getRev() with Connector().update(self.store1, doc, rev2) as w: w.writeAll('FILE', 'Hail to the king, baby!') w.suspend() rev_s2 = w.getRev() self.assertRaises(IOError, Connector().forget, self.store1, doc, rev1) Connector().forget(self.store1, doc, rev_s1) l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev2]) self.assertEqual(l.preRevs(), [rev_s2])
def test_fork_keep_handle(self): w = self.create(self.store1, "test.format.foo") self.assertEqual(w.getType(), "test.format.foo") w.commit() doc1 = w.getDoc() rev1 = w.getRev() with Connector().fork(self.store1, rev1, "test.ignore") as w: w.write('FILE', 'update') w.commit() doc2 = w.getDoc() rev2 = w.getRev() # perform a GC cycle self.gc(self.store1) l = Connector().lookupDoc(doc2) self.assertEqual(l.revs(), [rev2]) self.assertEqual(l.preRevs(), []) Connector().stat(rev2)
def createCommon(self, stores, type="public.data", creator="org.peerdrive.test-py", data={}): leadStore = stores.pop() w = self.create(leadStore, type, creator) for (part, blob) in data.items(): w.writeAll(part, blob) w.commit() doc = w.getDoc() rev = w.getRev() for store in stores: w = self.create(store) w.writeAll('PDSD', struct.dumps([struct.DocLink(store, doc)])) w.commit() Connector().replicateDoc(leadStore, doc, store) # verify the common document on all stores l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev]) for store in stores: self.assertTrue(store in l.stores()) return (doc, rev)
def test_sticky(self): # create the document which should get replicated w = self.create(self.store1) w.writeAll('FILE', "foobar") w.commit() doc = w.getDoc() rev = w.getRev() # create sticky contianer on first store s = struct.Folder() with s.create(self.store1, "foo") as dummy: s.append(struct.DocLink(self.store1, doc)) s.save() contDoc = s.getDoc() # need a dummy folder on both stores self.createCommon([self.store1, self.store2], "org.peerdrive.folder", data={'PDSD' : struct.dumps([{'':struct.DocLink(self.store1, contDoc)}])}) watch1 = self.watchDoc(doc, connector.Watch.EVENT_REPLICATED) watch2 = self.watchRev(rev, connector.Watch.EVENT_REPLICATED) # now replicate the folder to 2nd store Connector().replicateDoc(self.store1, contDoc, self.store2) # wait for sticky replicatin to happen self.assertTrue(watch1.waitForWatch()) self.assertTrue(watch2.waitForWatch()) # check doc (with rev) to exist on all stores l = Connector().lookupDoc(doc) self.assertEqual(l.revs(), [rev]) self.assertEqual(len(l.stores(rev)), 2) self.assertTrue(self.store1 in l.stores(rev)) self.assertTrue(self.store2 in l.stores(rev)) l = Connector().lookupRev(rev) self.assertEqual(len(l), 2) self.assertTrue(self.store1 in l) self.assertTrue(self.store2 in l)
def performSync(self, doc, strategy): watch = self.watchDoc(doc, connector.Watch.EVENT_MODIFIED) self.startSync(strategy, self.store1, self.store2) # first wait until the doc gets changed while True: watch.reset() self.assertTrue(watch.waitForWatch()) l = Connector().lookupDoc(doc) if len(l.revs()) == 1: break self.assertEqual(len(l.stores()), 2) self.assertTrue(self.store1 in l.stores()) self.assertTrue(self.store2 in l.stores()) # wait until sync_worker moved on result = self.erlCall( """peerdrive_sync_locks:lock(<<16#"""+doc.encode('hex')+""":128>>), peerdrive_sync_locks:unlock(<<16#"""+doc.encode('hex')+""":128>>).""") self.assertEqual(result, '{ok, ok}') return l