Esempio n. 1
0
 def testAutoCloseOnShutdown(self):
     db = pickledb.open(self.fname)
     # Hard task here, try to delete pickledb.os since __del__
     # calls on shutdown are not done in any particular order,
     # and the os module might be unavailable as a global variable
     del pickledb.os
     del db
     # Get back mr. os
     reload(pickledb)
     db = pickledb.open(self.fname)
Esempio n. 2
0
 def testCloseAndOpen(self):
     self.db._obj["fish"] = 31337      
     self.db.close()
     self.assertEqual(pickle.load(open(self.fname)), {"fish": 31337})
     self.db = pickledb.open(self.fname)
     self.assertEqual(pickle.load(open(self.fname)), {"fish": 31337})
     self.assertEqual(self.db._obj, {"fish": 31337})
Esempio n. 3
0
 def testOpenCreates(self):
     self.assert_(not os.path.exists(self.fname))
     db = pickledb.open(self.fname)
     self.assert_(os.path.exists(self.fname))
     db.close()
     # Might sound stupid, but don't delete the database on close!
     self.assert_(os.path.exists(self.fname))
Esempio n. 4
0
 def testOpenLockfile(self):
     db = pickledb.open(self.fname)
     self.assert_(os.path.exists(self.fname + ".lock"))
     # temp file should not be present after locking
     self.assertEqual(len(glob.glob(self.fname + "*")), 2)
     db.close()
     self.assert_(not os.path.exists(self.fname + ".lock"))
Esempio n. 5
0
    def testThreadedWrite(self): 
        try:
            db = pickledb.open(self.fname, write="thread")
            start = time.time()
            # NOTE: 20x the loop in testAlwaysWrite
            for x in xrange(10000):
                # Insertions are slow because _write() is called for
                # every insert, writing out all elements
                db[x] = x
            used = time.time() - start
            # Should be really fast as we are only doing inserts
            self.assert_(used < 1.0)
            # And the write thread should not have written anything yet
            self.assertEqual(pickle.load(open(self.fname)), 
                             {})
            time.sleep(2)
            # Thread sleeps 1 second between writing, and so we should wait
            # at most 2 seconds and be sure that content is in file
            self.assertEqual(len(pickle.load(open(self.fname))), 
                             10000)

            # Make sure thread only writes on change
            stat = os.stat(self.fname)
            time.sleep(2)
            stat2 = os.stat(self.fname)
            # Should not have rewritten since no changes have been
            # made in these two seconds
            self.assertEqual(stat, stat2)
            db.clear()
            try:
                self.assertEqual(len(pickle.load(open(self.fname))), 
                                 10000)
            except EOFError:    
                # The test might fail now and then because we might
                # not be fast enough to do the load-test before the
                # writing thread does it work. For instance, 
                # EOFError might be raised because the other thread
                # has started writing.
                pass

            # Make sure a sync really flushes out without
            # any delays. Contrary to the previous code, the code below
            # should NEVER fail.
            db.sync()
            self.assertEqual(pickle.load(open(self.fname)), 
                             {})
            db["fish"] = 1337
            db.close()
            self.assertEqual(pickle.load(open(self.fname)), 
                             {"fish": 1337})
        finally:
            try:
                # To make sure thread finishes if one of the 
                # tests above fails
                db.close()         
            except:
                pass    
Esempio n. 6
0
 def testAlwaysWrite(self):
     db = pickledb.open(self.fname)
     start = time.time()
     for x in xrange(500):
         # Insertions are slow because _write() is called for
         # every insert, writing out all elements
         db[x] = x
     used = time.time() - start
     self.assert_(used < 30.0)
     # If it's too fast, something is wrong! =))
     self.assert_(used > 0.2)
     db.close()
Esempio n. 7
0
 def testThreadedReopen(self): 
     try:
         db = pickledb.open(self.fname, write="thread")
         db["fish"] = 150279
         db.close()
         # Re-open of threaded database should start new 
         # writer thread
         db.open()
         self.assertEqual(db["fish"], 150279)
         db["fish"] = 98
         db.close()
         self.assertEqual(pickle.load(open(self.fname)), 
                          {"fish": 98})
     finally:
         try:
             db.close()
         except:
             pass        
Esempio n. 8
0
 def testAutoClose(self):
     db = pickledb.open(self.fname)
     del db # Dereference should close
     db = pickledb.open(self.fname)
Esempio n. 9
0
 def testOpenCloseOpen(self):
     db = pickledb.open(self.fname)
     db.close()
     # should work even if db still referenced
     db2 = pickledb.open(self.fname)
Esempio n. 10
0
 def testReOpenBeforeClose(self):
     db = pickledb.open(self.fname)
     self.assertRaises(pickledb.AlreadyOpenError, db.open)    
Esempio n. 11
0
 def testDoubleOpenFailsLock(self):
     db = pickledb.open(self.fname)
     self.assertRaises(pickledb.LockError, pickledb.open, self.fname)
Esempio n. 12
0
 def setUp(self):
     self.fname = tempfile.mktemp()
     self.db = pickledb.open(self.fname)