def _init_db_environment(self, homeDir, create=True):
        #NOTE: The identifier is appended to the path as the location for the db
        #This provides proper isolation for stores which have the same path but different identifiers

        if SUPPORT_MULTIPLE_STORE_ENVIRON:
            fullDir = join(homeDir, self.identifier)
        else:
            fullDir = homeDir
        envsetflags = db.DB_CDB_ALLDB
        envflags = db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_THREAD | db.DB_INIT_TXN | db.DB_RECOVER
        if not exists(fullDir):
            if create == True:
                makedirs(fullDir)
                self.create(path)
            else:
                return NO_STORE

        db_env = db.DBEnv()
        db_env.set_cachesize(0, 1024 * 1024 * 50)  # TODO

        # enable deadlock-detection
        db_env.set_lk_detect(db.DB_LOCK_MAXLOCKS)

        # increase the number of locks, this is correlated to the size (num triples) that
        # can be added/removed with a single transaction
        db_env.set_lk_max_locks(self.__locks)
        db_env.set_lk_max_lockers(self.__locks)
        db_env.set_lk_max_objects(self.__locks)

        #db_env.set_lg_max(1024*1024)
        #db_env.set_flags(envsetflags, 1)
        db_env.open(fullDir, envflags | db.DB_CREATE, 0)
        return db_env
Beispiel #2
0
    def _environment_init(cls):
        """
        Configures and opens the handle for the underlying DB environment
        :return:
        """
        # Create the DB home directory, if necessary.
        home = cls.DB_HOME
        if not os.path.exists(os.path.dirname(home)):
            try:
                os.makedirs(os.path.dirname(home))
            except OSError as exc:  # Guard against race condition
                if exc.errno != errno.EEXIST:
                    raise

        e = db.DBEnv()
        e.set_tx_max(1000)
        e.set_lk_max_lockers(2000)
        e.set_lk_max_locks(2000)
        e.set_lk_max_objects(2000)
        e.set_flags(db.DB_TXN_NOSYNC, True)
        e.open(
            home, db.DB_INIT_TXN | db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD
            | db.DB_INIT_LOCK | db.DB_INIT_MPOOL, 0660)

        cls.env = e
Beispiel #3
0
class BaseThreadedTestCase(unittest.TestCase):
    dbtype = db.DB_UNKNOWN  # must be set in derived class
    dbopenflags = 0
    dbsetflags = 0
    envflags = 0

    def setUp(self):
        if verbose:
            dbutils._deadlock_VerboseFile = sys.stdout

        homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
        self.homeDir = homeDir
        try:
            os.mkdir(homeDir)
        except OSError, e:
            if e.errno <> errno.EEXIST: raise
        self.env = db.DBEnv()
        self.setEnvOpts()
        self.env.open(homeDir, self.envflags | db.DB_CREATE)

        self.filename = self.__class__.__name__ + '.db'
        self.d = db.DB(self.env)
        if self.dbsetflags:
            self.d.set_flags(self.dbsetflags)
        self.d.open(self.filename, self.dbtype,
                    self.dbopenflags | db.DB_CREATE)
Beispiel #4
0
 def setUp(self):
     homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
     self.homeDir = homeDir
     try: os.mkdir(homeDir)
     except os.error: pass
     self.env = db.DBEnv()
     self.env.open(homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
                   db.DB_INIT_LOCK | db.DB_CREATE)
Beispiel #5
0
 def setUp(self):
     self.filename = self.__class__.__name__ + '.db'
     homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
     self.homeDir = homeDir
     try: os.mkdir(homeDir)
     except os.error: pass
     self.env = db.DBEnv()
     self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK )
    def do_open(self):
        self.homeDir = homeDir = os.path.join(
            os.path.dirname(sys.argv[0]), 'db_home')
        try: os.mkdir(homeDir)
        except os.error: pass
        self.env = db.DBEnv()
        self.env.open(homeDir, self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)

        self.filename = os.path.split(self.filename)[1]
        self.d = dbshelve.DBShelf(self.env)
        self.d.open(self.filename, self.dbtype, self.dbflags)
    def __init__(self, repoDir, readOnly=True):
        self.readOnly = readOnly

        if readOnly:
            envFlags = bdb.DB_INIT_MPOOL
            self.dbFlags = bdb.DB_RDONLY
        else:
            envFlags = bdb.DB_CREATE | bdb.DB_INIT_TXN | bdb.DB_INIT_MPOOL | \
                bdb.DB_INIT_LOCK | bdb.DB_INIT_LOG # | bdb.DB_RECOVER_FATAL
            self.dbFlags = bdb.DB_CREATE | bdb.DB_READ_UNCOMMITTED | bdb.DB_AUTO_COMMIT

        localDbFlags = self.dbFlags

        self.env = bdb.DBEnv(0)
        # set size of locking filesystem
        self.env.set_lk_max_objects(100000)
        self.env.set_lk_max_locks(100000)
        # set size of logging manager
        self.env.set_lg_regionmax(1 << 20)
        # open the environment
        #self.env.remove(repoDir)
        self.env.open(repoDir, envFlags)
        # Lazy syncing should improve the throughput
        #self.env.set_flags(bdb.DB_TXN_NOSYNC, 1)

        self._experimentDb = bdb.DB(self.env)
        self._experimentDb.open("experiments.db",
                                dbtype=bdb.DB_HASH,
                                flags=localDbFlags)
        self._requiredCountDb = bdb.DB(self.env)
        self._requiredCountDb.open("requiredCount.db",
                                   dbtype=bdb.DB_HASH,
                                   flags=localDbFlags)
        self._resultCountDb = bdb.DB(self.env)
        self._resultCountDb.open("resultCount.db",
                                 dbtype=bdb.DB_HASH,
                                 flags=localDbFlags)
        self._bundleIdsDb = bdb.DB(self.env)
        self._bundleIdsDb.open("bundleIds.db",
                               dbtype=bdb.DB_HASH,
                               flags=localDbFlags)

        # make the directory where results will be stored
        try:
            os.mkdir(os.path.join(repoDir, 'results'))
        except:
            # already exists? anyway, we'll get an error later
            pass

        if self.readOnly:
            self.txn = None
        else:
            self.txn = self.env.txn_begin()
Beispiel #8
0
 def open(self):
     if self.__shelf is not None:
         return
     log.debug("Opening state database...")
     self.__flags = db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD \
               | db.DB_INIT_LOCK | db.DB_INIT_MPOOL
     self.__env = env = db.DBEnv()
     env.open(self.__config.appdata_dir, self.__flags)
     self.__shelf = dbshelve.open(self.__config.state_db_file,
                                  'c',
                                  dbenv=env)
     log.debug("State database opened with %d entries.", len(self.keys()))
Beispiel #9
0
 def _init_db_environment(self, homeDir, create=True):
     if not exists(homeDir):
         if create==True:
             mkdir(homeDir) # TODO: implement create method and refactor this to it
             self.create(homeDir)
         else:
             return NO_STORE
     db_env = db.DBEnv()
     db_env.set_cachesize(0, CACHESIZE) # TODO
     #db_env.set_lg_max(1024*1024)
     db_env.set_flags(ENVSETFLAGS, 1)
     db_env.open(homeDir, ENVFLAGS | db.DB_CREATE)
     return db_env
Beispiel #10
0
 def destroy(self, configuration):
     """
     Destroy the underlying bsddb persistence for this store
     """
     if SUPPORT_MULTIPLE_STORE_ENVIRON:
         fullDir = join(configuration,self.identifier)
     else:
         fullDir = configuration
     if exists(configuration):
         #From bsddb docs:
         #A DB_ENV handle that has already been used to open an environment 
         #should not be used to call the DB_ENV->remove function; a new DB_ENV handle should be created for that purpose.
         self.close()
         db.DBEnv().remove(fullDir,db.DB_FORCE)
Beispiel #11
0
 def open(self):
     if self.__shelf is not None:
         return
     history_dir = os.path.join(self.__config.appdata_dir, "history")
     if not os.path.exists(history_dir):
         self.info("Creating history dir %s", history_dir)
         os.mkdir(history_dir)
     self.debug("Opening history database...")
     self.__flags = db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD \
               | db.DB_INIT_LOCK | db.DB_INIT_MPOOL
     self.__env = env = db.DBEnv()
     env.open(history_dir, self.__flags)
     self.__shelf = dbshelve.open("history.db", 'c', dbenv=env)
     self.debug("History database opened with %d entries.",
                len(self.keys()))
Beispiel #12
0
def getDbEnv():
    tl = threading.local()
    if not hasattr(tl, 'db_env'):
        tl.db_env = db.DBEnv()
        tl.db_env.open(
            PROJECT_DATA_ROOT, db.DB_INIT_MPOOL | db.DB_INIT_LOCK
            | db.DB_INIT_LOG | db.DB_REGISTER | db.DB_RECOVER | db.DB_INIT_TXN
            | db.DB_CHKSUM | db.DB_THREAD | db.DB_CREATE)
        #Use the following if you get errors about db.DB_REGISTER
        #tl.db_env.open( PROJECT_DATA_ROOT,
        #  db.DB_INIT_MPOOL|db.DB_INIT_LOCK|db.DB_INIT_LOG|
        #  db.DB_INIT_TXN|db.DB_CHKSUM|db.DB_THREAD|db.DB_CREATE )
        tl.db_env.set_timeout(16384, db.DB_SET_LOCK_TIMEOUT)
        tl.db_env.set_timeout(16384, db.DB_SET_TXN_TIMEOUT)
    return tl.db_env
Beispiel #13
0
 def _init_db_environment(self, homeDir, create=True):
     envsetflags  = db.DB_CDB_ALLDB
     envflags = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
     if not exists(homeDir):
         if create==True:
             mkdir(homeDir) # TODO: implement create method and refactor this to it
             self.create(homeDir)
         else:
             return NO_STORE
     db_env = db.DBEnv()
     db_env.set_cachesize(0, 1024*1024*50) # TODO
     #db_env.set_lg_max(1024*1024)
     db_env.set_flags(envsetflags, 1)
     db_env.open(homeDir, envflags | db.DB_CREATE)
     return db_env
Beispiel #14
0
    def setUp(self):
        self._tmpdir = tempfile.mkdtemp()
        self.full_name = os.path.join(self._tmpdir, 'test.grdb')
        self.env = db.DBEnv()
        self.env.set_cachesize(0, 0x2000000)
        self.env.set_lk_max_locks(25000)
        self.env.set_lk_max_objects(25000)
        self.env.set_flags(db.DB_LOG_AUTOREMOVE, 1)  # clean up unused logs
        # The DB_PRIVATE flag must go if we ever move to multi-user setup
        env_flags = db.DB_CREATE|db.DB_RECOVER|db.DB_PRIVATE|\
                    db.DB_INIT_MPOOL|db.DB_INIT_LOCK|\
                    db.DB_INIT_LOG|db.DB_INIT_TXN

        env_name = "%s/env" % (self._tmpdir, )
        if not os.path.isdir(env_name):
            os.mkdir(env_name)
        self.env.open(env_name, env_flags)
        (self.person_map, self.surnames) = self._open_tables()
Beispiel #15
0
def open_db(filename, db_dir='', filetype=db.DB_BTREE, writeback=False):
    global home_dir
    if not db_dir:
        db_dir = home_dir
    dir = setDBPath(db_dir)
    path = os.path.join(dir, filename)
    env = db.DBEnv()
    # Concurrent Data Store
    env.open(
        dir, db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL | db.DB_CREATE
        | db.DB_PRIVATE)
    #d = db.DB(env)
    #d.open(path, filetype, db.DB_THREAD|db.DB_CREATE)
    #_db = BsdDbShelf(d, writeback=writeback)
    _db = dbshelve.open(filename,
                        flags=db.DB_THREAD | db.DB_CREATE,
                        filetype=filetype,
                        dbenv=env)
    return _db, dir
  def Open(self, create_if_necessary=False):
    # XXX: Make sure this is the correct way to do this.
    if not os.path.isdir(self._db_root) and create_if_necessary:
      os.mkdir(self._db_root)

    if type(self._dbenv) != "DBEnv":
      self._dbenv = bdb.DBEnv()
      self._dbenv.set_cachesize(0, 20<<20)
      flags = bdb.DB_INIT_LOCK | bdb.DB_INIT_MPOOL \
              | (create_if_necessary and bdb.DB_CREATE)
      self._dbenv.open(self._db_root, flags)

    if create_if_necessary:
      self._CreateBDB()

    self._OpenBDB()

    if self.use_key_db:
      self._CreateKeyDB(create_if_necessary)
Beispiel #17
0
def listKeys():
    from bsddb import db
    import settings
    db_env = db.DBEnv()

    # Open the Database, use TXN
    # Not supported in BDB 4.5ish.
    #db_env.open(PROJECT_DATA_ROOT+"/data", db.DB_REGISTER|db.DB_CREATE|db.DB_RECOVER|db.DB_INIT_MPOOL|db.DB_INIT_LOCK|db.DB_INIT_LOG|db.DB_INIT_TXN|db.DB_CHKSUM)
    db_env.open(
        settings.BDB_PATH, db.DB_CREATE | db.DB_RECOVER | db.DB_INIT_MPOOL
        | db.DB_INIT_LOCK | db.DB_INIT_LOG | db.DB_INIT_TXN | db.DB_CHKSUM)
    db_env.set_timeout(16384, db.DB_SET_LOCK_TIMEOUT)
    db_env.set_timeout(16384, db.DB_SET_TXN_TIMEOUT)
    bdb = db.DB(db_env)
    bdb.open(settings.BDB_NAME, settings.BDB_PATH)

    keys = bdb.keys()
    keys.sort()
    print "Keys:", keys
    bdb.close()
    db_env.close()
    def test02_close_dbenv_delete_db_success(self):
        dbenv = db.DBEnv()
        dbenv.open(self.homeDir,
                   db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
                   0666)

        d = db.DB(dbenv)
        d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)

        try:
            dbenv.close()
        except db.DBError:
            pass  # good, it should raise an exception

        del d
        try:
            import gc
        except ImportError:
            gc = None
        if gc:
            # force d.__del__ [DB_dealloc] to be called
            gc.collect()
Beispiel #19
0
 def setUp(self):
     if self.useEnv:
         homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
         self.homeDir = homeDir
         try:
             shutil.rmtree(homeDir)
         except OSError, e:
             # unix returns ENOENT, windows returns ESRCH
             if e.errno not in (errno.ENOENT, errno.ESRCH): raise
         os.mkdir(homeDir)
         try:
             self.env = db.DBEnv()
             self.env.set_lg_max(1024 * 1024)
             self.env.set_flags(self.envsetflags, 1)
             self.env.open(homeDir, self.envflags | db.DB_CREATE)
             tempfile.tempdir = homeDir
             self.filename = os.path.split(tempfile.mktemp())[1]
             tempfile.tempdir = None
         # Yes, a bare except is intended, since we're re-raising the exc.
         except:
             shutil.rmtree(homeDir)
             raise
    def test01_close_dbenv_before_db(self):
        dbenv = db.DBEnv()
        dbenv.open(self.homeDir,
                   db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
                   0666)

        d = db.DB(dbenv)
        d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)

        try:
            dbenv.close()
        except db.DBError:
            try:
                d.close()
            except db.DBError:
                return
            assert 0, \
                   "DB close did not raise an exception about its "\
                   "DBEnv being trashed"

        # XXX This may fail when using older versions of BerkeleyDB.
        # E.g. 3.2.9 never raised the exception.
        assert 0, "dbenv did not raise an exception about its DB being open"
 def _dump_bsddbm_for_unity(self, outfile, outdir):
     """ write out the subset that unity needs of the REVIEW_STATS_CACHE
         as a C friendly (using struct) bsddb
     """
     env = bdb.DBEnv()
     if not os.path.exists(outdir):
         os.makedirs(outdir)
     env.open(outdir,
              bdb.DB_CREATE | bdb.DB_INIT_CDB | bdb.DB_INIT_MPOOL |
              bdb.DB_NOMMAP,  # be gentle on e.g. nfs mounts
              0600)
     db = bdb.DB(env)
     db.open(outfile,
             dbtype=bdb.DB_HASH,
             mode=0600,
             flags=bdb.DB_CREATE)
     for (app, stats) in self.REVIEW_STATS_CACHE.iteritems():
         # pkgname is ascii by policy, so its fine to use str() here
         db[str(app.pkgname)] = struct.pack('iii',
                                            stats.ratings_average or 0,
                                            stats.ratings_total,
                                            stats.dampened_rating)
     db.close()
     env.close()
Beispiel #22
0
    def open(self, path, create=True):
        homeDir = path
        envsetflags  = db.DB_CDB_ALLDB
        envflags = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
        if not exists(homeDir):
            if create==True:
                mkdir(homeDir) # TODO: implement create method and refactor this to it
                self.create(path)
            else:
                return -1
        if self.__identifier is None:
            self.__identifier = URIRef(pathname2url(abspath(homeDir)))
        self.db_env = db_env = db.DBEnv()
        db_env.set_cachesize(0, 1024*1024*50) # TODO
        #db_env.set_lg_max(1024*1024)
        db_env.set_flags(envsetflags, 1)
        db_env.open(homeDir, envflags | db.DB_CREATE)

        self.__open = True

        dbname = None
        dbtype = db.DB_BTREE
        dbopenflags = db.DB_THREAD

        dbmode = 0660
        dbsetflags   = 0

        # create and open the DBs
        self.__indicies = [None,] * 3
        self.__indicies_info = [None,] * 3
        for i in xrange(0, 3):
            index_name = to_key_func(i)(("s", "p", "o"), "c")
            index = db.DB(db_env)
            index.set_flags(dbsetflags)
            index.open(index_name, dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
            self.__indicies[i] = index
            self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))

        lookup = {}
        for i in xrange(0, 8):
            results = []
            for start in xrange(0, 3):
                score = 1
                len = 0
                for j in xrange(start, start+3):
                    if i & (1<<(j%3)):
                        score = score << 1
                        len += 1
                    else:
                        break
                tie_break = 2-start
                results.append(((score, tie_break), start, len))

            results.sort()
            score, start, len = results[-1]

            def get_prefix_func(start, end):
                def get_prefix(triple, context):
                    if context is None:
                        yield ""
                    else:
                        yield context
                    i = start
                    while i<end:
                        yield triple[i%3]
                        i += 1
                    yield ""
                return get_prefix

            lookup[i] = (self.__indicies[start], get_prefix_func(start, start + len), from_key_func(start), results_from_key_func(start, self._from_string))


        self.__lookup_dict = lookup

        self.__contexts = db.DB(db_env)
        self.__contexts.set_flags(dbsetflags)
        self.__contexts.open("contexts", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)

        self.__namespace = db.DB(db_env)
        self.__namespace.set_flags(dbsetflags)
        self.__namespace.open("namespace", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)

        self.__prefix = db.DB(db_env)
        self.__prefix.set_flags(dbsetflags)
        self.__prefix.open("prefix", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)

        self.__k2i = db.DB(db_env)
        self.__k2i.set_flags(dbsetflags)
        self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags|db.DB_CREATE, dbmode)

        self.__i2k = db.DB(db_env)
        self.__i2k.set_flags(dbsetflags)
        self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags|db.DB_CREATE, dbmode)

        self.__needs_sync = False
        t = Thread(target=self.__sync_run)
        t.setDaemon(True)
        t.start()
        self.__sync_thread = t
        return 1
Beispiel #23
0
    def __init__(self,
                 dbName,
                 load=False,
                 persistent=True,
                 dbenv=None,
                 rd_only=False,
                 cachesize=(0, 512)):
        """ Common Constructor """

        if dbName is None:
            #Ok so we want a pure RAM-based DB, let's do it
            self.pureRAM = True
            load = False
            persistent = False
        else:
            if os.path.exists(dbName) and not load:
                os.remove(dbName)
        self.db_name = dbName
        if dbenv is not None:
            self.dbObj = db.DB(dbenv)
        else:
            env = db.DBEnv()
            # default cache size is 200Mbytes
            env.set_cachesize(cachesize[0], cachesize[1] * 1024 * 1024, 0)
            env_flags = db.DB_CREATE | db.DB_PRIVATE | db.DB_INIT_MPOOL  #| db.DB_INIT_CDB | db.DB_THREAD
            env.log_set_config(db.DB_LOG_IN_MEMORY, 1)
            env.open(None, env_flags)
            self.dbObj = db.DB(env)
        self.opened = False
        # allow multiple key entries
        # TODO :  mettre en DB_DUPSORT
        self.dbObj.set_flags(db.DB_DUP | db.DB_DUPSORT)
        #        self.dbObj.set_flags(db.DB_DUP)

        if not load:
            try:
                self.dbObj.open(self.db_name,
                                dbtype=db.DB_HASH,
                                flags=db.DB_CREATE)
                self.opened = True
            except:
                raise IOError('Failed to create %s ' % self.db_name)
            print "Created DB:", dbName
        else:
            if self.db_name is None:
                raise ValueError('No Database name provided for loading')

            if not os.path.exists(self.db_name):
                self.dbObj.open(self.db_name,
                                dbtype=db.DB_HASH,
                                flags=db.DB_CREATE)
            else:
                if rd_only:
                    self.dbObj.open(dbName,
                                    dbtype=db.DB_HASH,
                                    flags=db.DB_RDONLY)
                else:
                    self.dbObj.open(dbName, dbtype=db.DB_HASH)
            self.opened = True
            print "Loaded DB:", dbName

        # keep in mind if the db is to be kept or destroy
        self.persistent = persistent
        # cursor object : might get instantiated later
        self.cursor = None
Beispiel #24
0
 def test02_db_home(self):
     env = db.DBEnv()
     # check for crash fixed when db_home is used before open()
     assert env.db_home is None
     env.open(self.homeDir, db.DB_CREATE)
     assert self.homeDir == env.db_home
Beispiel #25
0
from src.classes.fingerprints import *
from src.classes.fingerprints.bench import *
from src.classes.fingerprints.cortico import *
from src.classes.fingerprints.cochleo import *
from src.classes.fingerprints.CQT import *
from src.tools.fgpt_tools import db_creation, db_test
from src.tools.fgpt_tools import get_filepaths

SKETCH_ROOT = os.environ['SKETCH_ROOT']
db_path = op.join(SKETCH_ROOT, 'fgpt_db')
score_path = op.join(SKETCH_ROOT, 'fgpt_scores')

SND_DB_PATH = os.environ['SND_DB_PATH']

import bsddb.db as db
env = db.DBEnv()
env.set_cachesize(0, 512 * 1024 * 1024, 0)
env_flags = db.DB_CREATE | db.DB_PRIVATE | db.DB_INIT_MPOOL  #| db.DB_INIT_CDB | db.DB_THREAD
env.log_set_config(db.DB_LOG_IN_MEMORY, 1)
env.open(None, env_flags)

bases = {
    'RWCLearn': (op.join(SND_DB_PATH, 'rwc/Learn/'), '.wav'),
    'voxforge': (op.join(SND_DB_PATH, 'voxforge/main/'), 'wav'),
    #'voxforge':(op.join(SND_DB_PATH,'voxforge/main/Learn/'),'wav'),
    'GTZAN': (op.join(SND_DB_PATH, 'genres/'), '.au')
}

# The RWC subset path
#audio_path = '/sons/rwc/Learn'
set_id = 'GTZAN'  # Choose a unique identifier for the dataset considered
Beispiel #26
0
    def open(self, path, create=True):
        if self.__open:
            return
        homeDir = path
        #NOTE: The identifeir is appended to the path as the location for the db
        #This provides proper isolation for stores which have the same path but different identifiers
        if SUPPORT_MULTIPLE_STORE_ENVIRON:
            fullDir = join(homeDir,self.identifier)
        else:
            fullDir = homeDir
        envsetflags  = db.DB_CDB_ALLDB
        envflags = db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_THREAD | db.DB_INIT_TXN | db.DB_RECOVER
        if not exists(fullDir):
            if create==True:
                makedirs(fullDir)
                self.create(path)
            else:                
                return NO_STORE
        if self.__identifier is None:
            self.__identifier = URIRef(pathname2url(abspath(fullDir)))
        self.db_env = db_env = db.DBEnv()
        db_env.set_cachesize(0, 1024*1024*50) # TODO
        #db_env.set_lg_max(1024*1024)
        #db_env.set_flags(envsetflags, 1)
        db_env.open(fullDir, envflags | db.DB_CREATE,0)

        #Transaction object
        self.dbTxn = db_env.txn_begin()

        self.__open = True

        dbname = None
        dbtype = db.DB_BTREE
        dbopenflags = db.DB_THREAD

        dbmode = 0660
        dbsetflags   = 0

        # create and open the DBs
        self.__indicies = [None,] * 3
        self.__indicies_info = [None,] * 3
        for i in xrange(0, 3):
            index_name = to_key_func(i)(("s", "p", "o"), "c")
            index = db.DB(db_env)
            index.set_flags(dbsetflags)
            index.open(index_name, dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode,txn=self.dbTxn)
            self.__indicies[i] = index
            self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))

        lookup = {}
        for i in xrange(0, 8):
            results = []
            for start in xrange(0, 3):
                score = 1
                len = 0
                for j in xrange(start, start+3):
                    if i & (1<<(j%3)):
                        score = score << 1
                        len += 1
                    else:
                        break
                tie_break = 2-start
                results.append(((score, tie_break), start, len))

            results.sort()
            score, start, len = results[-1]

            def get_prefix_func(start, end):
                def get_prefix(triple, context):
                    if context is None:
                        yield ""
                    else:
                        yield context
                    i = start
                    while i<end:
                        yield triple[i%3]
                        i += 1
                    yield ""
                return get_prefix

            lookup[i] = (self.__indicies[start], get_prefix_func(start, start + len), from_key_func(start), results_from_key_func(start, self._from_string))


        self.__lookup_dict = lookup

        self.__contexts = db.DB(db_env)
        self.__contexts.set_flags(dbsetflags)
        self.__contexts.open("contexts", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode,txn=self.dbTxn)

        self.__namespace = db.DB(db_env)
        self.__namespace.set_flags(dbsetflags)
        self.__namespace.open("namespace", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode,txn=self.dbTxn)

        self.__prefix = db.DB(db_env)
        self.__prefix.set_flags(dbsetflags)
        self.__prefix.open("prefix", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode,txn=self.dbTxn)

        self.__i2k = db.DB(db_env)
        self.__i2k.set_flags(dbsetflags)
        self.__i2k.open("i2k", dbname, db.DB_HASH, dbopenflags|db.DB_CREATE, dbmode,txn=self.dbTxn)

        self.__needs_sync = False
        t = Thread(target=self.__sync_run)
        t.setDaemon(True)
        t.start()
        self.__sync_thread = t
        return VALID_STORE