Beispiel #1
0
def write_aceview_genes(species,log=0): 
    """
    **Write correspondance between AceView and Ensembl genes.**
    ::
    
        Arguments:
            species --- species name
            chipName -- chip name
            log ------- handle of a log file for recording messages
        
        Input:
            %species_genes_by_region.bkdb
            %species_ensembl_genes_by_gene.bkdb
            
        Output:           
            %species_ens_by_ace_gene.txt
        
    """
           
#open ace genes by- ensembl region
    t1=time.time()


    path=os.environ['PYDATA']+'/'+species+'/ensembl/'+species+'_genes_by_region.bkdb'
    byRegionDB=bsddb.btopen(path)    
    path=os.environ['PYDATA']+'/'+species+'/aceview/'+species+'_ensembl_genes_by_gene.bkdb'
    byAceGeneDB=bsddb.btopen(path)
    outFile=open(os.environ['PYDATA']+"/"+species+'/txt/'+species+'_ens_by_ace_gene.txt','w')
        
                    
    byAceGeneIDs=byAceGeneDB.keys()
    ensGeneIDs=[]
    for region in byRegionDB:
        ensGeneIDs.extend(cPickle.loads(byRegionDB[region]).IDs)        
    for byAceGeneID in byAceGeneIDs:
        ensGenes=cPickle.loads(byAceGeneDB[byAceGeneID])                        
        outFile.write(byAceGeneID+'\t')
        if ensGenes==[] or ensGenes==None:
            outFile.write('{''}\n')
        else:
            outFile.write('{')
            for i in range(len(ensGenes)):
                try:
                    #current ensemnl gene may have been already erased
                    ensGeneIDs.pop(ensGeneIDs.index(ensGenes[i]))
                except:
                    pass
                if i==0:
                    outFile.write("'"+ensGenes[i]+"'")
                else:
                    outFile.write(','+"'"+ensGenes[i]+"'") 
            outFile.write('}\n')
    for i in range(len(ensGeneIDs)):
        outFile.write('0\t%s\n'%ensGeneIDs[i])
    byRegionDB.close()       
    byAceGeneDB.close()
    outFile.close()
    t2=time.time()
    if log!=0:
        log.write('%s\t%s\t\t30\t%s_aceview_genes.txt\taceview\t%u\t%.2f\n'%(date.today(),species,species,len(byAceGeneIDs),t2-t1))
Beispiel #2
0
    def _initcache(self, cachefile_name=None):

        if cachefile_name:
            self._id = cachefile_name
        else:
            self._id = "".join(
                [uuid.uuid4().hex,
                 time.strftime("%Y%m%d%H%M%S")])
        self._cache_path = path.dirname(__file__)
        if len(self._cache_path) == 0:
            self._cache_path = os.getcwd()
        assert path.exists(self._cache_path)
        if cachefile_name:
            if "/".join([self._cache_path,
                         self._id]) not in self.__class__.__setitem__mutex__:
                self.data = bsddb.btopen(
                    "/".join([self._cache_path, self._id]), "c")
                self.__class__.__setitem__mutex__.setdefault(
                    "/".join([self._cache_path, self._id]),
                    [threading.Lock(), self.data, 0])
                self.__class__.__setitem__mutex__["/".join(
                    [self._cache_path, self._id])][2] += 1
            else:
                self.data = self.__class__.__setitem__mutex__["/".join(
                    [self._cache_path, self._id])][1]
                self.__class__.__setitem__mutex__["/".join(
                    [self._cache_path, self._id])][2] += 1
        else:
            self.data = bsddb.btopen("/".join([self._cache_path, self._id]),
                                     "n")
            self.__class__.__setitem__mutex__.setdefault(
                "/".join([self._cache_path, self._id]),
                [threading.Lock(), self.data, 0])
Beispiel #3
0
    def __init__(self, filename, qsize, scd_policy, lbd=0.7, upd_cl_us=None):
        self.filename = filename
        self.scd_policy = scd_policy
        index_vertices_file = os.path.splitext(filename)[0] + "_v.hdr"
        index_isolated_vertices_file = os.path.splitext(filename)[0] + "_iv.hdr"
        index_corner_file = os.path.splitext(filename)[0] + "_c.hdr"
        index_corner_vertice_file = os.path.splitext(filename)[0] + "_cv.hdr"
        index_clusters_file = os.path.splitext(filename)[0] + "_cl.hdr"
        self.index_vertices = bsddb.btopen(index_vertices_file)
        self.index_isolated_vertices = bsddb.btopen(index_isolated_vertices_file)
        self.index_corners = bsddb.btopen(index_corner_file)
        self.index_corner_vertice = bsddb.btopen(index_corner_vertice_file)
        self.index_clusters = bsddb.btopen(index_clusters_file)

        self.iv_keys = sorted([int(i) + 1 for i in self.index_vertices.keys()])
        self.ic_keys = sorted([int(i) + 1 for i in self.index_corners.keys()])
        # self.icv_keys = sorted([int(i) + 1 for i in self.index_corner_vertice.keys()])

        self.cfile = open(filename)
        self.load_header()

        self.cl_usage = {}
        self.queue_size = int(math.ceil((qsize / 100.0 * len(self.index_clusters))))
        self.timestamp = 0
        self.lbd = lbd

        self.wastings = []
        self.last_removed = -1
        self.last_loaded = -1

        self._n_load_clusters = {}
        self._n_unload_clusters = {}
        self.misses = 0
        self.hits = 0
        self.access = 0

        self.colour = 0

        self.__vertices = {}
        self.__L = {}
        self.__R = {}
        self.__O = {}
        self.__V = {}
        self.__C = {}
        self.__VOs = {}

        self.vertices = _DictGeomElem(self, "vertices", self.__vertices)
        self.L = _DictGeomElem(self, "L", self.__L)
        self.R = _DictGeomElem(self, "R", self.__R)
        self.O = _DictGeomElem(self, "O", self.__O)
        self.V = _DictGeomElem(self, "V", self.__V)
        self.C = _DictGeomElem(self, "C", self.__C)
        self.VOs = _DictGeomElem(self, "VOs", self.__VOs)

        if upd_cl_us is None:
            self.update_cluster_usage = self._update_cluster_usage
        else:
            self.update_cluster_usage = types.MethodType(upd_cl_us, self)

        signal.signal(signal.SIGINT, lambda x, y: self.print_cluster_info())
 def openbase(self):
     if sys.platform.startswith('darwin'):
         self.dbcmdscheduler     = plyvel.DB(self.name_basesession, create_if_missing=True)
         self.dbsessionscheduler = plyvel.DB(self.name_basecmd, create_if_missing=True)
     else:
         self.dbcmdscheduler     = bsddb.btopen(self.name_basecmd , 'c')
         self.dbsessionscheduler = bsddb.btopen(self.name_basesession , 'c')
Beispiel #5
0
def main():
    try:
        db = bsddb.btopen(DA_FILE, "w")
    except:
        print "DB doesn't exist, creating a new one"
        db = bsddb.btopen(DA_FILE, "c")
    lib.set_seed(SEED)

    for index in range(DB_SIZE):
        krng = 64 + lib.get_random() % 64
        key = ""
        for i in range(krng):
            key += str(unichr(lib.get_random_char()))
        vrng = 64 + lib.get_random() % 64
        value = ""
        for i in range(vrng):
            value += str(unichr(lib.get_random_char()))
        print key
        print value
        print ""
        db[key] = value
    try:
        db.close()
    except Exception as e:
        print e
def OpenDB():
    global db
    db = bsddb.btopen(DBFILE, 'c')
    try:
        for k in db.keys():
            None
    except:
        db.close()
        db = bsddb.btopen(DBFILE, 'n')
Beispiel #7
0
 def __init__(self, resource_prefix):
     """
     Init the knowledge resource
     :param resource_prefix - the resource directory and file prefix
     """
     self.term_to_id = bsddb.btopen(resource_prefix + '_term_to_id.db', 'r')
     self.id_to_term = bsddb.btopen(resource_prefix + '_id_to_term.db', 'r')
     self.path_to_id = bsddb.btopen(resource_prefix + '_path_to_id.db', 'r')
     self.id_to_path = bsddb.btopen(resource_prefix + '_id_to_path.db', 'r')
     self.l2r_edges = bsddb.btopen(resource_prefix + '_l2r.db', 'r')
 def __init__(self, resource_prefix):
     """
     Init the knowledge resource
     :param resource_prefix - the resource directory and file prefix
     """
     self.term_to_id = bsddb.btopen(resource_prefix + '_term_to_id.db')
     self.id_to_term = bsddb.btopen(resource_prefix + '_id_to_term.db')
     self.path_to_id = bsddb.btopen(resource_prefix + '_path_to_id.db')
     self.id_to_path = bsddb.btopen(resource_prefix + '_id_to_path.db')
     self.l2r_edges = bsddb.btopen(resource_prefix + '_l2r.db')
Beispiel #9
0
 def _initcache(self, cachefile_name = None):
     if cachefile_name:
         self._id = cachefile_name
     else:
         self._id = "".join([uuid.uuid4().hex, time.strftime("%Y%m%d%H%M%S")])
     self._cache_path = path.dirname(__file__)
     assert path.exists(self._cache_path)
     if cachefile_name:
         self.data = bsddb.btopen(os.path.join(self._cache_path, self._id), "w")
     else:
         self.data = bsddb.btopen(os.path.join(self._cache_path, self._id), "n")
Beispiel #10
0
  def __init__(self, filename, mode):

    self.mode = mode

    if mode == "w":
      self.test_db_items = bsddb.btopen(filename, 'n')

    elif mode == "r":
      self.test_db_items = bsddb.btopen(filename, mode)
      self.next_rec_num = 0   # Initialise next record counter
      self.num_records = len(self.test_db_items)
Beispiel #11
0
    def __openDB4(self):
        """Make sure the database is read, and open all subdatabases.

        Raise bsddb.error."""

        dbpath = self._getDBPath()
        if not os.path.isdir(dbpath):
            os.makedirs(dbpath)

        if self.dbopen:
            return self.OK

        # We first need to remove the __db files, otherwise rpm will later
        # be really upset. :)
        for i in xrange(9):
            try:
                os.unlink(os.path.join(dbpath, "__db.00%d" % i))
            except OSError:
                pass
        try:
            self.basenames_db      = bsddb.hashopen(
                os.path.join(dbpath, "Basenames"), "c")
            self.conflictname_db   = bsddb.hashopen(
                os.path.join(dbpath, "Conflictname"), "c")
            self.dirnames_db       = bsddb.btopen(
                os.path.join(dbpath, "Dirnames"), "c")
            self.filemd5s_db       = bsddb.hashopen(
                os.path.join(dbpath, "Filemd5s"), "c")
            self.group_db          = bsddb.hashopen(
                os.path.join(dbpath, "Group"), "c")
            self.installtid_db     = bsddb.btopen(
                os.path.join(dbpath, "Installtid"), "c")
            self.name_db           = bsddb.hashopen(
                os.path.join(dbpath, "Name"), "c")
            self.packages_db       = bsddb.hashopen(
                os.path.join(dbpath, "Packages"), "c")
            self.providename_db    = bsddb.hashopen(
                os.path.join(dbpath, "Providename"), "c")
            self.provideversion_db = bsddb.btopen(
                os.path.join(dbpath, "Provideversion"), "c")
            self.requirename_db    = bsddb.hashopen(
                os.path.join(dbpath, "Requirename"), "c")
            self.requireversion_db = bsddb.btopen(
                os.path.join(dbpath, "Requireversion"), "c")
            self.sha1header_db     = bsddb.hashopen(
                os.path.join(dbpath, "Sha1header"), "c")
            self.sigmd5_db         = bsddb.hashopen(
                os.path.join(dbpath, "Sigmd5"), "c")
            self.triggername_db    = bsddb.hashopen(
                os.path.join(dbpath, "Triggername"), "c")
            self.dbopen = True
        except bsddb.error:
            return
        return self.OK
Beispiel #12
0
def save_clusters(lr, clusters, filename):
    with file(filename, "w") as cfile:
        # indexes
        index_vertices_file = os.path.splitext(filename)[0] + "_v.hdr"
        index_isolated_vertices_file = os.path.splitext(filename)[0] + "_iv.hdr"
        index_clusters_file = os.path.splitext(filename)[0] + "_cl.hdr"
        index_corner_file = os.path.splitext(filename)[0] + "_c.hdr"
        index_corner_vertice_file = os.path.splitext(filename)[0] + "_cv.hdr"
        index_vertices = bsddb.btopen(index_vertices_file)
        index_isolated_vertices = bsddb.btopen(index_isolated_vertices_file)
        index_corners = bsddb.btopen(index_corner_file)
        index_corner_vertice = bsddb.btopen(index_corner_vertice_file)
        index_clusters = bsddb.btopen(index_clusters_file)

        cfile.write("edge vertex: %d\n" % lr.mr)
        cfile.write("vertex: %d\n" % lr.m)
        cfile.write("triangles: %d\n" % lr.number_triangles)

        for i, cluster in enumerate(clusters):
            # cfile.write("Cluster %d\n" % i)
            init_cluster = cfile.tell()

            minv, maxv = 2 ** 32, -1
            minc, maxc = 2 ** 32, -1
            mincv, maxcv = 2 ** 32, -1

            for elem in cluster:
                if elem[0] == "v":
                    if elem[1] < lr.mr:
                        maxv = max(elem[1], maxv)
                        minv = min(elem[1], minv)
                    else:
                        index_isolated_vertices[str(elem[1])] = str(i)
                elif elem[0] == "V":
                    index_corners[str(elem[1])] = str(i)

                elif elem[0] == "C":
                    # maxcv = max(elem[1], maxcv)
                    # mincv = min(elem[1], mincv)
                    index_corner_vertice[str(elem[1])] = str(i)

                cfile.write(" ".join([str(e) for e in elem]) + "\n")
            cluster_size = cfile.tell() - init_cluster

            if maxv > -1:
                index_vertices[str(maxv)] = str(i)

            # if maxc > -1:
            # index_corners[str(maxc)] = str(i)

            # if maxcv > -1:
            # index_corner_vertice[str(maxcv)] = str(i)

            index_clusters[str(i)] = "%d %d %d %d" % (init_cluster, cluster_size, minv, maxv)
Beispiel #13
0
 def __init__(self, filename, stl_filename):
     self.filename = filename
     self.stl_filename = stl_filename
     index_vertices_file = os.path.splitext(filename)[0] + '_v.hdr'
     index_clusters_file = os.path.splitext(filename)[0] + '_c.hdr'
     self.index_vertices = bsddb.btopen(index_vertices_file)
     self.index_clusters = bsddb.btopen(index_clusters_file)
     self.loaded_clusters = []
     self.faces = {}
     self.vertices = {}
     self._make_clusters_maps()
Beispiel #14
0
    def __init_dbd__(self):
        print "prepare to initial the bdb"
        print "[#] Check if the db exist"
        if os.path.exists("all_sites.db"):
            os.remove('all_sites.db')

        if os.path.exists('visited.db'):
            os.remove('visited.db')
        print "[#] Cleared the db"
            
        """
>>> import bsddb
>>> db = bsddb.btopen('spam.db', 'c')
>>> for i in range(10): db['%d'%i] = '%d'% (i*i)
...
>>> db['3']
'9'
>>> db.keys()
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
>>> db.first()
('0', '0')
>>> db.next()
('1', '1')
>>> db.last()
('9', '81')
>>> db.set_location('2')
('2', '4')
>>> db.previous()
('1', '1')
>>> for k, v in db.iteritems():
...     print k, v
0 0
1 1
2 4
3 9
4 16
5 25
6 36
7 49
8 64
9 81
>>> '8' in db
True
>>> db.sync()
0

        """
        try:
            self.all_sites  = bsddb.btopen(file = 'all_sites.db',   flag = 'c')
            self.visited    = bsddb.btopen(file = 'visited.db',     flag = 'c') 
            print "[*]Success init BDB"
        except:
            print "[!]Bad ! Can't create BDB!"
Beispiel #15
0
 def __init_dbd__(self):
     try:
        
         index = "result_server" + str(time.strftime('%Y%m%d%H%M%S')) + ".db"
         self.scan_report = bsddb.btopen(file = index,flag = 'c')
         print "result_server is created successfully!"
         self.filename.append(index)
         self.filelist = bsddb.btopen(file = "filelist_re.db",flag = 'c')
         self.filelist[str(time.strftime('%Y%m%d%H%M%S'))] = index
       #  print self.filelist.items()
     except:
         print "Can't create BDB files!"
Beispiel #16
0
 def open(self):
     file_flag = lambda fname: 'c' if (not os.path.exists(fname)) else 'rw'
     if (not self.isOpened):
         if ((self.has_bsddb) and ((self.isPickleMethodUseStrings) or
                                   (self.isPickleMethodUseMarshal) or
                                   (self.isPickleMethodUseSafeSerializer))):
             self.__db = bsddb.btopen(self.fileName,
                                      file_flag(self.fileName))
             self.__isOpened = True
         elif (self.isPickleMethodUseBsdDbShelf):
             self.__db = shelve.BsdDbShelf(
                 bsddb.btopen(self.fileName, file_flag(self.fileName)), 0,
                 True)
             self.__isOpened = True
Beispiel #17
0
def generate_dbs():
    db_i_table = bsddb.btopen("../components/data/i_table.db", 'c')
    db_n_table = bsddb.btopen("../components/data/n_table.db", 'c')
    
    i_table, n_table = _make_table()
    
    db_i_table.update(i_table)
    db_n_table.update(n_table)
    
    db_i_table.sync()
    db_n_table.sync()
    
    db_i_table.close()
    db_n_table.close()
Beispiel #18
0
 def __init__(self):
     self.workDir = db_config.workDir
     self.osmsis = "/home/dndred/osmosis/bin/osmosis"
     self.useRealDataCache = True
     self.useOsmDataCache = True
     self.useGeoCodeCache = True
     self.saveAnonymousObj = True
     self.osmSearch = []
     # self.conGis = psycopg2.connect(host=db_config.osm_host, database=db_config.osm_database, user=db_config.osm_user, password=db_config.osm_password)
     # self.curGis = self.conGis.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
     self.bdb = bsddb.btopen(db_config.bdbname, 'c')
     self.APIbdb = bsddb.btopen(db_config.ApiCache, 'c')
     self.osmObjects = []
     self.realObjects = []
Beispiel #19
0
def generate_dbs(trunc_limit):
    cur_path = os.path.dirname(os.path.abspath(__file__)) + "/../" + conf.DB_DIR + "/"
    db_i_table = bsddb.btopen(cur_path + conf.FN_I_TABLE[:-3] + "_" + str(trunc_limit) + ".db", 'c')
    db_n_table = bsddb.btopen(cur_path + conf.FN_N_TABLE[:-3] + "_" + str(trunc_limit) + ".db", 'c')
    
    i_table, n_table = _make_table(trunc_limit)
    
    db_i_table.update(i_table)
    db_n_table.update(n_table)
    
    db_i_table.sync()
    db_n_table.sync()
    
    db_i_table.close()
    db_n_table.close()
Beispiel #20
0
 def __init__bsddb__(self):
     print "Create the bsddb!!"
     print "[^] checking if the bsddb is exist"
     if os.path.exists("pages.db"):
         print "[*] Delete the exist  db!"
         os.remove("pages.db")
     if os.path.exists("visited.db"):
         print "[*] Delete the visited.db"
         os.remove("visited.db")
     try:
         self.pages = bsddb.btopen(file="pages.db", flag='c')
         self.visited = bsddb.btopen(file="visited.db", flag='c')
         print "[^] create db success!!"
     except:
         print "Create db error!!"
Beispiel #21
0
 def __init__bsddb__(self):
     print "Create the bsddb!!"
     print "[^] checking if the bsddb is exist"
     if os.path.exists("pages.db"):
         print "[*] Delete the exist  db!"
         os.remove("pages.db")
     if os.path.exists("visited.db"):
         print "[*] Delete the visited.db"
         os.remove("visited.db")
     try:
         self.pages=bsddb.btopen(file="pages.db",flag='c')
         self.visited=bsddb.btopen(file="visited.db",flag='c')
         print "[^] create db success!!"
     except:
         print "Create db error!!"
Beispiel #22
0
def generate_dbs():
    cur_path = os.path.dirname(os.path.abspath(__file__)) + "/../" + conf.DB_DIR + "/"
    db_i_table = bsddb.btopen(cur_path + conf.FN_I_TABLE, 'c')
    db_n_table = bsddb.btopen(cur_path + conf.FN_N_TABLE, 'c')
    
    i_table, n_table = _make_table()
    
    db_i_table.update(i_table)
    db_n_table.update(n_table)
    
    db_i_table.sync()
    db_n_table.sync()
    
    db_i_table.close()
    db_n_table.close()
Beispiel #23
0
    def CheckBackup(self):
        """ Checks the backup file. """

        try:
            # Let's see if the backup is fine...
            db = bsddb.btopen(self.dbName + ".bak", "c")
            keys = db.keys()
            db.close()
            os.remove(self.dbName)
            shutil.copy(self.dbName + ".bak", self.dbName)
            self.db = bsddb.btopen(self.dbName, "c")
            return True
        except:
            # The backup database is corrupted... bad news
            return False
Beispiel #24
0
def test_bsddb():
    import bsddb

    db = bsddb.btopen("bsddb.db", "c")
    db["key1"] = "value1"
    db["key2"] = "value2"
    db.close()

    db = bsddb.btopen("bsddb.db", "r")
    if len(db) != 2:
        raise Exception("Wrong length")
    if db["key1"] != "value1":
        raise Exception("value1 incorrect {}".format(db["key1"]))
    if db["key2"] != "value2":
        raise Exception("value2 incorrect {}".format(db["key2"]))
Beispiel #25
0
    def CheckBackup(self):
        """ Checks the backup file. """

        try:
            # Let's see if the backup is fine...
            db = bsddb.btopen(self.dbName + ".bak", "c")
            keys = db.keys()
            db.close()
            os.remove(self.dbName)
            shutil.copy(self.dbName + ".bak", self.dbName)
            self.db = bsddb.btopen(self.dbName, "c")
            return True
        except:
            # The backup database is corrupted... bad news
            return False
Beispiel #26
0
    def execute(self, op):
        if op == 1:
            autor_db = bsddb.btopen('autor.db', 'c')

            if self._cod not in autor_db:
                print("Codigo nao existente, por favor, tente novamente.\n")
                return

            autor_db.pop(self._cod)

            autor_db.close()

        if op == 2:
            leitor_db = bsddb.btopen('leitor.db', 'c')

            if self._cod not in leitor_db:
                print("Codigo nao existente, por favor, tente novamente.\n")
                return

            leitor_db.pop(self._cod)

            leitor_db.close()

        if op == 3:
            livros_db = bsddb.btopen('livros.db', 'c')

            print(self._cod)

            if self._cod not in livros_db:
                print("Codigo nao existente, por favor, tente novamente.\n")
                return

            livros_db.pop(self._cod)

            livros_db.close()

        if op == 4:
            autor_livro_db = bsddb.btopen('autor_livro.db', 'c')

            if (self._codL in autor_livro_db):

                autor_livro_db.pop(self._codL)

            else:
                print("Codigo do livro invalido.\n")
                return

            autor_livro_db.close()
Beispiel #27
0
def create(user, path, name):
    """
    insere un fichier 'name' dans le FilesPool
    se trouvant dans 'path' ce fichier appartien a l'utilisateur 'user'
    """
    db = bsddb.btopen(folder + 'index.db', 'c')
    if not os.path.exists(path):
        raise ("Le fichier n'existe pas")

    basename = name
    basename = basename.replace('_', '')
    #on supprime les '_' du user
    user = user.replace('_', '')

    #generation de la cle de la base berkeley
    key = user + '_' + str(random.getrandbits(30)) + '_' + basename
    while db.get(key):
        key = user + '_' + str(random.getrandbits(30)) + '_' + basename

    #generation du chemin dans l'arborescence
    hash = md5.new(key).hexdigest()
    hash = hash[:4] + '/' + hash[4:8] + '/' + hash[8:]
    while os.path.exists(folder + hash):
        hash = md5.new(key + random.getrandbits(30)).hexdigest()
        hash = hash[:4] + '/' + hash[4:8] + '/' + hash[8:]

    #creation du fichier
    os.makedirs(folder + hash[:10])
    #     print 'path=', path, 'folder+hash=', folder+hash
    #attention copy de shutil pose probleme !
    os.rename(path, folder + hash)

    #sauvegarde du chemin dans la base berkeley
    db[key] = hash
    db.close()
Beispiel #28
0
def __access_buckets(filename,clear,new_key=None,new_value=None):
    """
    Access data in forkbomb cache, potentially clearing or
    modifying it as required.
    """

    internal_db = bsddb.btopen(filename, 'c', 0644 )
    handle = open(filename,"r")
    fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
    storage = shelve.BsdDbShelf(internal_db)

    if clear:
        storage.clear()
        storage.close()
        fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
        return {}

    if not storage.has_key("data"):
        storage["data"] = {}
    else: 
        pass

    if new_key is not None:
        # bsdb is a bit weird about this
        newish = storage["data"].copy()
        newish[new_key] = new_value
        storage["data"] = newish

    rc = storage["data"].copy()
    storage.close()
    fcntl.flock(handle.fileno(), fcntl.LOCK_UN)

    return rc
Beispiel #29
0
    def kbabel_tm_import(self):
        """Attempt to import the Translation Memory used in KBabel."""
        if bsddb is None or not path.exists(self.kbabel_dir):
            return
        for tm_filename in os.listdir(self.kbabel_dir):
            if not tm_filename.startswith(
                    "translations.") or not tm_filename.endswith(".db"):
                continue
            tm_file = path.join(self.kbabel_dir, tm_filename)
            lang = tm_filename.replace("translations.", "").replace(".db", "")
            translations = bsddb.btopen(tm_file, 'r')

            for source, target in translations.iteritems():
                unit = {"context": ""}
                source = source[:-1]  # null-terminated
                target = target[16:-1]  # 16 bytes of padding, null-terminated
                unit["source"] = _prepare_db_string(source)
                unit["target"] = _prepare_db_string(target)
                self.tmdb.add_dict(unit, "en", lang, commit=False)
            self.tmdb.connection.commit()

            logging.debug('%d units migrated from KBabel %s TM.' %
                          (len(translations), lang))
            translations.close()
            self.migrated.append(_("KBabel's Translation Memory: %(database_language_code)s") % \
                      {"database_language_code": lang})
Beispiel #30
0
def open_bsddb(filename, flag='r', useHash=False, mode=0666):
    """open bsddb index instead of hash by default.
    useHash=True forces it to use anydbm default (i.e. hash) instead.
    Also gives more meaningful error messages."""
    try: # 1ST OPEN AS BTREE
        if useHash: # FORCE IT TO USE HASH INSTEAD OF BTREE
            return open_anydbm(filename, flag)
        else:
            return bsddb.btopen(filename, flag, mode)
    except bsddb.db.DBAccessError: # HMM, BLOCKED BY PERMISSIONS
        if flag=='c' or flag=='w': # TRY OPENING READ-ONLY
            try:
                ifile = file(filename)
            except IOError:
                # Hmm, not even readable. Raise a generic permission error.
                raise PermissionsError('insufficient permissions \
to open file: ' + filename)
            ifile.close()
            # We can read the file, so raise a ReadOnlyError.
            raise ReadOnlyError('file is read-only: '+ filename)
        else: # r OR n FLAG: JUST RAISE EXCEPTION
            raise PermissionsError('insufficient permissions to open file: '
                                   + filename)
    except bsddb.db.DBNoSuchFileError:
        raise NoSuchFileError('no file named: ' + filename)
    except bsddb.db.DBInvalidArgError: # NOT A BTREE FILE...
        try:
            if useHash: # NO POINT IN TRYING HASH YET AGAIN...
                raise bsddb.db.DBInvalidArgError
            # fallback to using default: hash file
            return open_anydbm(filename, flag)
        except bsddb.db.DBInvalidArgError:
            raise WrongFormatError('file does not match expected \
shelve format: ' + filename)
    def getTablesWithForm(self):
        tableList = []
        reports = db.btopen(self._fileName)
        tableList = reports.keys()
        reports.close()

        return tableList
Beispiel #32
0
def readDb(inFile):
    if not os.path.exists(inFile):
        print "error:db file:%s not found" % inFile
        return False, ""
    db = bsddb.btopen(inFile, "r")
    #只读(read only)
    return True, db
Beispiel #33
0
 def __init__(self, path="bsd.db"):
   self.path = path
   try:
     os.makedirs(self.path)
   except:
     pass
   self.db = bsddb.btopen(os.path.join(self.path, "datastore.db"), "c")
Beispiel #34
0
def make_bsddb(dbfile, dump_file):
    import bsddb
    db = bsddb.btopen(dbfile, 'w', cachesize=1024*1024*1024)

    from infogami.infobase.utils import flatten_dict

    indexable_keys = set([
        "authors.key",  "works.key", # edition
        "authors.author.key", "subjects", "subject_places", "subject_people", "subject_times" # work
    ])
    for type, key, revision, timestamp, json in read_tsv(dump_file):
        db[key] = json
        d = simplejson.loads(json)
        index = [(k, v) for k, v in flatten_dict(d) if k in indexable_keys]
        for k, v in index:
            k = web.rstrips(k, ".key")
            if k.startswith("subject"):
                v = '/' + v.lower().replace(" ", "_")

            dbkey  = web.safestr('by_%s%s' % (k, v))
            if dbkey in db:
                db[dbkey] = db[dbkey] + " " + key
            else:
                db[dbkey] = key
    db.close()
    log("done")
Beispiel #35
0
 def __init__(self, name, shelved=False, queue=False):
     global _config;
     self.name = name
     self.shelved = shelved 
     self.queue = queue
     self.condition = Condition()
     self.keys = {}  # key
     self.indexs = {}  # 对tuple【2】的索引
     self.count = 0       
     self.scheme=None
     if shelved:
         # open db   
         path = '%s/%s_%s.bsd' % (_config['root_dir'], _config['server_key'], name)
         if self.queue:
             bsd = bsddb.rnopen(path, 'c')
         else:                
             bsd = bsddb.btopen(path, 'c')
             
         self.db = shelve.BsdDbShelf(bsd)
         # self.db['中文']='test'
         # print self.db.keys()
         # del self.db['中文']
        
     else:
         if self.queue:
             self.db = OrderedDict()  # memery db
         else:
             self.db = {}  # memery db
Beispiel #36
0
def process_redirect_dump(writer, redirects_dump):
    import bsddb

    db = bsddb.btopen('solrdump/redirects.db',
                      'w',
                      cachesize=1024 * 1024 * 1024)

    for type, key, revision, timestamp, json_data in read_tsv(redirects_dump):
        d = json.loads(json_data)
        if not key.startswith(("/authors/", "/works/")):
            continue

        location = d.get('location')
        if location:
            # Old redirects still start with /a/ instead of /authors/.
            location = location.replace("/a/", "/authors/")
            db[key] = location

    for key in db:
        if key.startswith("/works/"):
            redirect = find_redirect(db, key)
            if redirect:
                writer.write([(redirect, "redirect", key)])

    return db
Beispiel #37
0
def create(user, path, name):
    """
    insere un fichier 'name' dans le FilesPool
    se trouvant dans 'path' ce fichier appartien a l'utilisateur 'user'
    """
    db = bsddb.btopen(folder+'index.db', 'c')
    if not os.path.exists(path):
        raise("Le fichier n'existe pas")
    
    basename = name
    basename = basename.replace('_', '')
    #on supprime les '_' du user
    user = user.replace('_', '')
    
    #generation de la cle de la base berkeley
    key  = user+'_'+str(random.getrandbits(30))+'_'+basename
    while db.get(key):
        key  = user+'_'+str(random.getrandbits(30))+'_'+basename

    #generation du chemin dans l'arborescence
    hash = md5.new(key).hexdigest()
    hash = hash[:4]+'/'+hash[4:8]+'/'+hash[8:]
    while os.path.exists(folder+hash):
        hash = md5.new(key+random.getrandbits(30)).hexdigest()
        hash = hash[:4]+'/'+hash[4:8]+'/'+hash[8:]
         
    #creation du fichier      
    os.makedirs(folder+hash[:10])
#     print 'path=', path, 'folder+hash=', folder+hash
    #attention copy de shutil pose probleme !
    os.rename(path, folder+hash)
    
    #sauvegarde du chemin dans la base berkeley
    db[key] = hash
    db.close()
Beispiel #38
0
    def __init__(self, name, path="", unique=True):
        #
        #  Constructs new DiskQueueManager
        #
        #  @param name : name of the queue
        #  @param path : path where databases will be stored
        #  @param unique : if set to False, disable unique capability,
        #                  also add() begin to work exactly as set()
        #

        #  @param ext_tree : file extention of tree db
        #  @param ext_hash : file extention of hash db

        ext_tree = ".tree.db"
        ext_hash = ".hash.db"

        self.name = name

        file_md5 = self._hash(name)

        self.file_tree = path + file_md5 + ext_tree
        self.file_hash = path + file_md5 + ext_hash

        self.db_tree = bsddb.btopen(self.file_tree, "c")
        self.db_hash = bsddb.hashopen(self.file_hash, "c")

        self.new_key_id = 0L

        self.unique = unique
Beispiel #39
0
    def __init__(self, filename, call_class, key=lambda x: x.id,
                 allow_multiple=False):
        self.filename = filename
        self.allow_multiple = allow_multiple
        self.fh = open(self.filename)
        self.call_class = call_class
        self.key = key
        self.idxfile = filename + FileIndex.ext

        if need_update(filename, self.idxfile):
            self.clear()
            self.db = bsddb.btopen(self.idxfile, 'c')
            self.create()
            self.db.close()

        self.db = bsddb.btopen(self.idxfile, 'r')
Beispiel #40
0
    def _handleUrl(self, irc, channel, nick, url):
        # 1 lookup from the db
        scname = '%s_%s' % (irc.network, channel)

        if not UrlReader.databases.has_key(scname):
            # new db
            dbpath = plugins.makeChannelFilename('%s.db' % scname, 'urldata')
            UrlReader.databases[scname] = bsddb.btopen(dbpath, 'c')

        urldb = UrlReader.databases[scname]

        if urldb.has_key(url):
            poster, title = pickle.loads(urldb[url])
            msg = '%s has already posted it: %s' % (poster, title)
            irc.reply(msg.encode('utf-8'))
        else:

            try:
                title = self._getTitle(url)
                if title != None:
                    urldb[url] = pickle.dumps([nick.decode('utf-8'), title])
                    urldb.sync()
                    irc.reply(title.encode('utf-8'))
            except:
                traceback.print_exc()
                irc.reply('No Title')
Beispiel #41
0
def make_bsddb(dbfile, dump_file):
    import bsddb

    db = bsddb.btopen(dbfile, "w", cachesize=1024 * 1024 * 1024)

    indexable_keys = {
        "authors.key",
        "works.key",  # edition
        "authors.author.key",
        "subjects",
        "subject_places",
        "subject_people",
        "subject_times",  # work
    }
    for type, key, revision, timestamp, json_data in read_tsv(dump_file):
        db[key] = json_data
        d = json.loads(json_data)
        index = [(k, v) for k, v in flatten_dict(d) if k in indexable_keys]
        for k, v in index:
            k = web.rstrips(k, ".key")
            if k.startswith("subject"):
                v = "/" + v.lower().replace(" ", "_")

            dbkey = web.safestr(f"by_{k}{v}")
            if dbkey in db:
                db[dbkey] = db[dbkey] + " " + key
            else:
                db[dbkey] = key
    db.close()
    log("done")
Beispiel #42
0
  def _LoadBdbCacheFile(self, data):
    """Load data from bdb caches into a map.

    Args:
      data: a map.Map subclass

    Returns:
      Nothing.  Cache data is loaded into the 'data' parameter.

    Raises:
      CacheNotFound: if the database file does not exist
    """
    db_file = os.path.join(self.output_dir, self.CACHE_FILENAME)
    if not os.path.exists(db_file):
      self.log.debug('cache file does not exist: %r', db_file)
      raise error.CacheNotFound('cache file does not exist: %r' % db_file)

    db = bsddb.btopen(db_file, 'r')
    for k in db:
      if self.IsMapPrimaryKey(k):
        password_entry = self.ConvertValueToMapEntry(db[k])
        if not data.Add(password_entry):
          self.log.warn('could not add entry built from %r', db[k])

    db.close()
Beispiel #43
0
def join_bsddb(file1, file1_key_column, file2, file2_key_column, separator):
    """
Join two files with the given key columns and separator. Read the first
file in fully to create a lookup table. Then iterate through the second
file and if the key column from file2 is found in the lookup table output
a line for each match from file1. The columns output will be the join
field, all the columns from file2 followed by all the columns from file1.

An iterator is returned. Each line does not have a trailing newline
"""
    lookup_table = bsddb.btopen(None, "c")
    for line in file1:
        line_parts = line.rstrip("\n").split(separator)
        key = line_parts.pop(file1_key_column)
        lookup_table[key] = separator.join(line_parts)

    for line in file2:
        line_parts = line.rstrip("\n").split(separator)
        key = line_parts.pop(file2_key_column)

        if key in lookup_table:
            (_key, value) = lookup_table.set_location(key)
            while key == _key:
                yield separator.join([key, separator.join(line_parts), value])
                try:
                    (_key, value) = lookup_table.next()
                except bsddb.db.DBNotFoundError:
                    (_key, value) = (None, None)

    lookup_table.close()
Beispiel #44
0
def make_bsddb(dbfile, dump_file):
    import bsddb
    db = bsddb.btopen(dbfile, 'w', cachesize=1024 * 1024 * 1024)

    from infogami.infobase.utils import flatten_dict

    indexable_keys = set([
        "authors.key",
        "works.key",  # edition
        "authors.author.key",
        "subjects",
        "subject_places",
        "subject_people",
        "subject_times"  # work
    ])
    for type, key, revision, timestamp, json in read_tsv(dump_file):
        db[key] = json
        d = simplejson.loads(json)
        index = [(k, v) for k, v in flatten_dict(d) if k in indexable_keys]
        for k, v in index:
            k = web.rstrips(k, ".key")
            if k.startswith("subject"):
                v = '/' + v.lower().replace(" ", "_")

            dbkey = web.safestr('by_%s%s' % (k, v))
            if dbkey in db:
                db[dbkey] = db[dbkey] + " " + key
            else:
                db[dbkey] = key
    db.close()
    log("done")
Beispiel #45
0
def dbread(filename):
    import bsddb
    db = bsddb.btopen(filename,'r')
    dump = ''
    for key in sorted(db.keys()):
        dump += "%s = %s\n" % (e(key), e(db[key]))
    return dump
Beispiel #46
0
    def kbabel_tm_import(self):
        """Attempt to import the Translation Memory used in KBabel."""
        if not path.exists(self.kbabel_dir):
            return
        for tm_filename in os.listdir(self.kbabel_dir):
            if not tm_filename.startswith("translations.") or not tm_filename.endswith(".db"):
                continue
            tm_file = path.join(self.kbabel_dir, tm_filename)
            lang = tm_filename.replace("translations.", "").replace(".db", "")
            translations = bsddb.btopen(tm_file, "r")

            for source, target in translations.iteritems():
                unit = {"context": ""}
                source = source[:-1]  # null-terminated
                target = target[16:-1]  # 16 bytes of padding, null-terminated
                unit["source"] = _prepare_db_string(source)
                unit["target"] = _prepare_db_string(target)
                self.tmdb.add_dict(unit, "en", lang, commit=False)
            self.tmdb.connection.commit()

            logging.debug("%d units migrated from KBabel %s TM." % (len(translations), lang))
            translations.close()
            self.migrated.append(
                _("KBabel's Translation Memory: %(database_language_code)s") % {"database_language_code": lang}
            )
 def updateDicc(self, tabla_nombre, diccHeader, diccRows):
     datos = diccHeader
     datos.append(diccRows)
     dicc = db.btopen(self._fileName)
     dicc[tabla_nombre] = pickle.dumps(datos)
     dicc.close()
     return 0
Beispiel #48
0
  def testVerifyFailure(self):
    # Hide the warning that we expect to get
    class TestFilter(logging.Filter):
      def filter(self, record):
        return not record.msg.startswith('verify failed: %d keys missing')

    fltr = TestFilter()
    logging.getLogger('NssDbShadowHandler').addFilter(fltr)

    # create a map
    m = shadow.ShadowMap()
    s = shadow.ShadowMapEntry()
    s.name = 'foo'
    self.failUnless(m.Add(s))

    updater = nssdb.NssDbShadowHandler({'dir': self.workdir,
                                        'makedb': '/usr/bin/makedb'})
    written = updater.Write(m)

    self.failUnless(os.path.exists(updater.temp_cache_filename),
                    'updater.Write() did not create a file')

    # change the cache
    db = bsddb.btopen(updater.temp_cache_filename)
    del db[db.first()[0]]
    db.sync()
    db.close()

    retval = updater.Verify(written)

    self.failUnlessEqual(False, retval)
    self.failIf(os.path.exists(os.path.join(updater.temp_cache_filename)))
    # no longer hide this message
    logging.getLogger('NssDbShadowHandler').removeFilter(fltr)
Beispiel #49
0
def readDb(inFile):
    if not os.path.exists(inFile):
        print "error:db file:%s not found" % inFile
        return False , ""
    db=bsddb.btopen(inFile,"r")
    #只读(read only)
    return True, db
Beispiel #50
0
    def _handleUrl(self,irc,channel,nick,url):
        # 1 lookup from the db
        scname='%s_%s'%(irc.network,channel)

        if not UrlReader.databases.has_key(scname):
            # new db
            dbpath=plugins.makeChannelFilename('%s.db'%scname ,'urldata')
            UrlReader.databases[scname]=bsddb.btopen(dbpath,'c')

        urldb = UrlReader.databases[scname]

        if urldb.has_key(url):
            poster,title=pickle.loads(urldb[url])
            msg='%s has already posted it: %s'%(poster,title)
            irc.reply(msg.encode('utf-8'))
        else:

            try:
                title=self._getTitle(url)
                if title != None:
                    urldb[url]=pickle.dumps([nick.decode('utf-8'),title])
                    urldb.sync()
                    irc.reply(title.encode('utf-8'))
            except:
                traceback.print_exc()
                irc.reply('No Title')
	def __init__(self, name, path = "", unique = True):
		#
		#  Constructs new DiskQueueManager
		#
		#  @param name : name of the queue
		#  @param path : path where databases will be stored
		#  @param unique : if set to False, disable unique capability, 
		#                  also add() begin to work exactly as set()
		#
		

		#  @param ext_tree : file extention of tree db
		#  @param ext_hash : file extention of hash db
		
		ext_tree = ".tree.db"
		ext_hash = ".hash.db"
		
		self.name = name
		
		file_md5 = self._hash(name)

		self.file_tree = path + file_md5 + ext_tree
		self.file_hash = path + file_md5 + ext_hash
			
		self.db_tree = bsddb.btopen(  self.file_tree, "c")
		self.db_hash = bsddb.hashopen(self.file_hash, "c")

		self.new_key_id = 0L
		
		self.unique = unique
Beispiel #52
0
def open_bsddb(filename, flag='r', useHash=False, mode=0666):
    """open bsddb index instead of hash by default.
    useHash=True forces it to use anydbm default (i.e. hash) instead.
    Also gives more meaningful error messages."""
    try: # 1ST OPEN AS BTREE
        if useHash: # FORCE IT TO USE HASH INSTEAD OF BTREE
            return open_anydbm(filename, flag)
        else:
            return bsddb.btopen(filename, flag, mode)
    except bsddb.db.DBAccessError: # HMM, BLOCKED BY PERMISSIONS
        if flag=='c' or flag=='w': # TRY OPENING READ-ONLY
            try:
                ifile = file(filename)
            except IOError: # HMM, NOT EVEN READABLE. RAISE GENERIC PERMISSIONS ERROR
                raise PermissionsError('insufficient permissions to open file: '
                                       +filename)
            ifile.close() # OK, WE CAN READ THE FILE, SO RAISE EXCEPTION WITH
            raise ReadOnlyError('file is read-only: '+filename) # VERY SPECIFIC MEANING!
        else: # r OR n FLAG: JUST RAISE EXCEPTION
            raise PermissionsError('insufficient permissions to open file: '
                                   +filename)
    except bsddb.db.DBNoSuchFileError:
        raise NoSuchFileError('no file named: '+filename)
    except bsddb.db.DBInvalidArgError: # NOT A BTREE FILE...
        try:
            if useHash: # NO POINT IN TRYING HASH YET AGAIN...
                raise bsddb.db.DBInvalidArgError
            # fallback to using default: hash file
            return open_anydbm(filename, flag)
        except bsddb.db.DBInvalidArgError:
            raise WrongFormatError('file does not match expected shelve format: '+filename)
Beispiel #53
0
def main():    
    try:
        db = bsddb.btopen(DA_FILE, "w")
    except:
        print "DB doesn't exist, creating a new one"

        db = bsddb.btopen(DA_FILE, "c")

    lib.set_seed(SEED)
    for index in range( DB_SIZE):
        krng = 64 + lib.get_random() % 64
        key = str(index)
        #for i in range(krng):
         #   key += str(unichr(lib.get_random_char()))
        vrng = 64 + lib.get_random() % 64
        value = ""
        for i in range(vrng):
            value += str(unichr(lib.get_random_char()))
        #print key
        #print value
        #print ""
        db[key] = value
        
    #try:
        ##retrieve_key(db)
        ##retrieve_data(db)
        ##retrieve_key_range(db)
        #'''
        #time1 = time.time()*1000
        #if db.has_key('999'):
            #print db['999']
        #time3 = time.time()*1000 - time1
        #print time3
        #'''
        #time1 = time.time()*1000
        #for i in range(30):
            #if db.has_key('999'):
                #print db['999']
        #time2 = time.time()*1000 - time1   
        #print time2, 'millisecaoiands'
        #'''
        #print "difference" ,time2/time3
        #db.close()
        #'''
    #except Exception as e:
        #print e
    return db
 def deleteDicc(self, tabla_nombre):
     dicc = db.btopen(self._fileName)
     if (not tabla_nombre in dicc):
         dicc.close()
         return -1
     del dicc[tabla_nombre]
     dicc.close()
     return 0
Beispiel #55
0
    def __connect(self):
        if self.__db is not None:
            self.close()

        if self.__option == 'bt':
            self.__db = bsddb.btopen(self.__filename, self.__flag, self.__mode)
        if self.__option == 'hash':
            self.__db = bsddb.hashopen(self.__filename, self.__flag, self.__mode)
 def getDiccHeader(self, tabla_nombre):
     dicc = db.btopen(self._fileName)
     if (not tabla_nombre in dicc):
         dicc.close()
         return []
     datos = pickle.loads(dicc[tabla_nombre])
     dicc.close()
     return datos[:-1]
Beispiel #57
0
def create_bdb_from_dict(bdb_path, bdb_dict):
    bdb_path = pathlib2.Path(bdb_path)
    if bdb_path.exists():
        raise nog.exc.MinterError('Path already exists: {}'.format(bdb_path.as_posix()))
    impl.nog.filesystem.create_missing_directories_for_file(bdb_path)
    bdb = bsddb.btopen(bdb_path.as_posix(), 'c')
    bdb.update({bytes(k): bytes(v) for k, v in bdb_dict.items()})
    bdb.close()
Beispiel #58
0
 def __init__bsddb__(self):
     print "[%s] [INFO] Create the bsddb ..."%(self.__time())
     print "[%s] [WARNING] Checking if the bsddb is exist"%(self.__time())
     if os.path.exists("pages.db"):
         # print "[%s] [INFO] Delete the exist  db ..."%(self.__time())
         os.remove("pages.db")
     if os.path.exists("visited.db"):
         # print "[%s] [INFO] Delete the visited.db ..."%(self.__time())
         os.remove("visited.db")
     try:
         
         self.pages=bsddb.btopen(file="pages.db",flag='c')
         self.visited=bsddb.btopen(file="visited.db",flag='c')
         
         print "[%s] [INFO] Create db success ..."%(self.__time())
     except:
         print "[%s] [ERROR] Create db error ..."%(self.__time())