def __init__(self, request, **kw): log.debug("shelve graphdb init") GraphDataBase.__init__(self, request, **kw) gddir = os.path.join(request.cfg.data_dir, 'graphdata') if not os.path.isdir(gddir): os.mkdir(gddir) self.graphshelve = os.path.join(gddir, 'graphdata.shelve') self.use_sq_dict = getattr(request.cfg, 'use_sq_dict', False) if self.use_sq_dict: import sq_dict self.shelveopen = sq_dict.shelve else: self.shelveopen = shelve.open # XXX (falsely) assumes shelve.open creates file with same name; # it happens to work with the bsddb backend. if not os.path.exists(self.graphshelve): db = self.shelveopen(self.graphshelve, 'c') db.close() self.db = None self.cache = dict() self.out = dict() lock_path = os.path.join(gddir, "graphdata-lock") self._lock_timeout = getattr(request.cfg, 'graphdata_lock_timeout', None) self._readlock = _Lock(lock_path, exclusive=False) self._writelock = _Lock(lock_path, exclusive=True)
def commit(self): # Ha, puny gullible humans think I do transactions # .. instead, I abuse this metod for rehash mode & bulk update if self.doing_rehash: log.debug("commit in rehash mode, doing bulk update") for success, docid, rev_or_exc in self.couch_db.update(self.modified_pages.values()): if not success: raise DbError( "at least one update failed while writing updated docs at end of rehash. first exception docid: %s exception: %s" % (docid, rev_or_exc)) self.modified_pages = {}
def commit(self): # Ha, puny gullible humans think I do transactions # .. instead, I abuse this metod for rehash mode & bulk update if self.doing_rehash: log.debug("commit in rehash mode, doing bulk update") for success, docid, rev_or_exc in self.couch_db.update( self.modified_pages.values()): if not success: raise DbError( "at least one update failed while writing updated docs at end of rehash. first exception docid: %s exception: %s" % (docid, rev_or_exc)) self.modified_pages = {}
def close(self): if self.out: self.writelock() for key, value in self.out.items(): if value is self.UNDEFINED: self.db.pop(key, None) else: self.db[key] = value self.out = dict() self.cache.clear() if self.db is not None: self.db.close() self.db = None if self._writelock.release(): log.debug("released a write lock for %r" % (self.graphshelve,)) else: log.debug("did not release any write locks for %r" % (self.graphshelve,)) if self._readlock.release(): log.debug("released a read lock for %r" % (self.graphshelve,)) else: log.debug("did not released any read locks for %r" % (self.graphshelve,))
def close(self): if self.out: self.writelock() for key, value in self.out.items(): if value is self.UNDEFINED: self.db.pop(key, None) else: self.db[key] = value self.out = dict() self.cache.clear() if self.db is not None: self.db.close() self.db = None if self._writelock.release(): log.debug("released a write lock for %r" % (self.graphshelve, )) else: log.debug("did not release any write locks for %r" % (self.graphshelve, )) if self._readlock.release(): log.debug("released a read lock for %r" % (self.graphshelve, )) else: log.debug("did not released any read locks for %r" % (self.graphshelve, ))
def readlock(self): if self._writelock.is_locked(): return if self._readlock.is_locked(): return log.debug("getting a read lock for %r" % (self.graphshelve,)) try: self._readlock.acquire(self._lock_timeout) except LockTimeout: items = self.graphshelve, self._lock_timeout log.error("getting a read lock for %r timed out after %.02fs" % items) raise log.debug("got a read lock for %r" % (self.graphshelve,)) self.db = self.shelveopen(self.graphshelve, "r")
def readlock(self): if self._writelock.is_locked(): return if self._readlock.is_locked(): return log.debug("getting a read lock for %r" % (self.graphshelve, )) try: self._readlock.acquire(self._lock_timeout) except LockTimeout: items = self.graphshelve, self._lock_timeout log.error("getting a read lock for %r timed out after %.02fs" % items) raise log.debug("got a read lock for %r" % (self.graphshelve, )) self.db = self.shelveopen(self.graphshelve, "r")
def __init__(self, request, dbname="gwiki", couchurl=None): log.debug("couchdb graphdb init") GraphDataBase.__init__(self, request) self.dbname = dbname if couchurl: self.couch_server = couchdb.Server(couchurl) else: self.couch_server = couchdb.Server() self.make_pagemeta_class() # we could really use db connection recycling/pooling... self.init_db() # use write cache/bulk update workaround for rehash slowness, # unsafe otherwise self.doing_rehash = False self.modified_pages = {}
def writelock(self): if self._writelock.is_locked(): return if self._readlock.is_locked(): if self.db is not None: self.db.close() self.db = None self._readlock.release() log.debug("released a write lock for %r" % (self.graphshelve,)) log.debug("getting a write lock for %r" % (self.graphshelve,)) try: self._writelock.acquire(self._lock_timeout) except LockTimeout: items = self.graphshelve, self._lock_timeout log.error("getting a write lock for %r timed out after %.02fs" % items) raise log.debug("got a write lock for %r" % (self.graphshelve,)) self.db = self.shelveopen(self.graphshelve, "c")
def writelock(self): if self._writelock.is_locked(): return if self._readlock.is_locked(): if self.db is not None: self.db.close() self.db = None self._readlock.release() log.debug("released a write lock for %r" % (self.graphshelve, )) log.debug("getting a write lock for %r" % (self.graphshelve, )) try: self._writelock.acquire(self._lock_timeout) except LockTimeout: items = self.graphshelve, self._lock_timeout log.error("getting a write lock for %r timed out after %.02fs" % items) raise log.debug("got a write lock for %r" % (self.graphshelve, )) self.db = self.shelveopen(self.graphshelve, "c")
def clear_metas(self): log.debug("deleting db from couchdb: ", self.dbname) del self.couch_server[self.dbname] self.init_db()
def delpage(self, pagename): log.debug("delpage %s" % (repr(pagename),)) page = encode_page(pagename) self.out[page] = self.UNDEFINED self.cache.pop(page, None)
def delpage(self, pagename): log.debug("delpage %s" % (repr(pagename), )) page = encode_page(pagename) self.out[page] = self.UNDEFINED self.cache.pop(page, None)
def savepage(self, pagename, pagedict): log.debug("savepage %s = %s" % (repr(pagename), repr(pagedict))) page = encode_page(pagename) self.out[page] = pagedict self.cache.pop(page, None)
def dump_db(self): log.debug("db dump:") log.debug(dict(self.dbroot.items()))