Example #1
0
def show_public_timeline(since_ids = None,  hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
    '''
    user - twitter.User object
    since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
    hashtag_list - A list of hashtags if you want to search for particular hashtags
    hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
    tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
    
    Returns the tweet id of the latest tweet.
    '''
    timeline = api.GetPublicTimeline(since_id = since_ids)
    if not timeline:
        return since_ids
    hashtag_public_db = None
    tweet_db = None
    if hashtag_db_name:
        hashtag_public_db = gdbm.open(hashtag_db_name,'c')
    if tweet_db_name:
        tweet_db = gdbm.open(tweet_db_name,'c')
    since_ids = show_timeline(timeline, hashtag_list = hashtag_list, hashtag_db = hashtag_public_db, tweet_db = tweet_db)
    if hashtag_db_name:
        hashtag_public_db.close()
    if tweet_db_name:
        tweet_db.close()
    return since_ids
Example #2
0
def main():
    """Main function"""

    # WRITE #######

    db = gdbm.open('foo_gdbm', 'c')

    db['one'] = 'un'
    db['two'] = 'dos'
    db['three'] = 'tres'

    db.close()

    # WHICH DBM ###

    print "whichdb:", whichdb.whichdb('foo_gdbm')
    print

    # READ ########

    db = gdbm.open('foo_gdbm', 'r')

    # Iterate loop: first method (common to any dbm module)
    for k in db.keys():
        print k, ':', db[k]

    # Iterate loop: second method (more efficient)
    # The following code prints every key in the database db, without having to create a list in memory that contains them all.
    k = db.firstkey()
    while k != None:
        print k, ':', db[k]
        k = db.nextkey(k)

    db.close()
Example #3
0
def setproxy(command):
    proxystr = command.strip("--proxy ")
    proxystr.strip("\r\n")
    print proxystr,
    if proxystr.startswith("http:"):
        f = gdbm.open("proxyfile","c")
        f["http"] = proxystr
        f.close()
    elif proxystr.startswith("https:"):
        f = gdbm.open("proxyfile","c")
        f["https"] = proxystr
        f.close()
    elif proxystr is "None" or proxystr is "none":
        f = gdbm.open("proxyfile","c")
        f["https"] = ""
        f["http"] = ""
        print f["https"]
        print f["http"]
        f.close()
        print "proxy set to none"
    else:
        f = gdbm.open("proxyfile","c")
        f["https"] = ""
        f["http"] = ""
        print f["https"]
        print f["http"]
        f.close()
        
    return proxystr
 def __init__(self, dbname):
     try:
         import gdbm
         gdbm.open(dbname)
         self.__db = shelve.DbfilenameShelf(dbname)
     except:
         self.__db = shelve.open(dbname)
Example #5
0
def main():
    """Main function"""

    # WRITE #######

    db = gdbm.open('foo_gdbm', 'c')

    db['one'] = 'un'
    db['two'] = 'dos'
    db['three'] = 'tres'

    db.close()

    # WHICH DBM ###

    print "whichdb:", whichdb.whichdb('foo_gdbm')
    print 

    # READ ########

    db = gdbm.open('foo_gdbm', 'r')

    # Iterate loop: first method (common to any dbm module)
    for k in db.keys():
        print k, ':', db[k]

    # Iterate loop: second method (more efficient)
    # The following code prints every key in the database db, without having to create a list in memory that contains them all.
    k = db.firstkey()
    while k != None:
        print k, ':', db[k]
        k = db.nextkey(k)

    db.close()
Example #6
0
	def __init__(self, settings, datadir, log, mempool, netmagic,
		     readonly=False, fast_dbm=False):
		self.settings = settings
		self.log = log
		self.mempool = mempool
		self.readonly = readonly
		self.netmagic = netmagic
		self.fast_dbm = fast_dbm
		self.blk_cache = Cache(750)
		self.orphans = {}
		self.orphan_deps = {}
		if readonly:
			mode_str = 'r'
		else:
			mode_str = 'c'
			if fast_dbm:
				self.log.write("Opening database in fast mode")
				mode_str += 'f'
		self.misc = gdbm.open(datadir + '/misc.dat', mode_str)
		self.blocks = gdbm.open(datadir + '/blocks.dat', mode_str)
		self.height = gdbm.open(datadir + '/height.dat', mode_str)
		self.blkmeta = gdbm.open(datadir + '/blkmeta.dat', mode_str)
		self.tx = gdbm.open(datadir + '/tx.dat', mode_str)

		if 'height' not in self.misc:
			self.log.write("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
			self.misc['height'] = str(-1)
			self.misc['msg_start'] = self.netmagic.msg_start
			self.misc['tophash'] = ser_uint256(0L)
			self.misc['total_work'] = hex(0L)

		if 'msg_start' not in self.misc or (self.misc['msg_start'] != self.netmagic.msg_start):
			self.log.write("Database magic number mismatch. Data corruption or incorrect network?")
			raise RuntimeError
Example #7
0
 def __init__(self):
     self.id_idx_db = gdbm.open('blockid_idx.db', 'cf')
     self.num_idx_db = gdbm.open('blocknum_idx.db', 'cf')
     self.blockdata_db = gdbm.open('blockdata.db', 'cf')
     self.party_idxa_db = gdbm.open('party_idx_a.db', 'cf')
     self.party_idxb_db = gdbm.open('party_idx_b.db', 'cf')
     self.mc = memcache.Client(config.mc_endpoint)
Example #8
0
	def saveuserdata(self,mainclass):
		iter=mainclass.elementy.get_iter(0)
		i=False
		
		
		if mainclass.combo1.child.get_text()!="":
			while iter:
				if mainclass.combo1.child.get_text()!= mainclass.elementy.get_value(iter,0):
					 i=True
				else:
					i=False
					break;
				
				iter=mainclass.elementy.iter_next(iter)
			if i:
				d = gdbm.open(userdata, 'c')
				l=len(d.keys())
				d[str(l)]=str(mainclass.combo1.child.get_text())
				d.close()
		i=False
		iter=mainclass.elementy2.get_iter(0)
		if mainclass.combo2.child.get_text()!="":
			while iter:
				if mainclass.combo2.child.get_text()!= mainclass.elementy2.get_value(iter,0):
					 i=True
				else:
					i=False
					break;
				
				iter=mainclass.elementy2.iter_next(iter)
			if i:
				d = gdbm.open(userdata2, 'c')
				l=len(d.keys())
				d[str(l)]=str(mainclass.combo2.child.get_text())
				d.close()
Example #9
0
    def get(self):
        session_store = sessions.get_store(request=self.request)
        session = session_store.get_session()
        username = session.get('username')
        key = session.get('key')
        try:
            database = gdbm.open('database.gdbm','cf')
        except:
            time.sleep(1.0)
            database = gdbm.open('database.gdbm','cf')    

        if username:
            if ('usr'+username).encode('ascii','ignore') in database:
                auth_info = json.loads(
                    database[('usr'+username).encode('ascii','ignore')]
                )
    
                if auth_info['key'] == key:
                    template_values = {'auth':auth_info}    
                    template = JINJA_ENVIRONMENT.get_template('ko.html')
                    database.close()
                    self.response.write(template.render(template_values))
                    
    
                else:
                    database.close()
                    self.redirect('/jj70/login')
                    
            else:
                database.close()
                self.redirect('/jj70/login')
    
        else:
            database.close()
            self.redirect('/jj70/login')
Example #10
0
    def post(self):
        username = self.request.get('username')
        password = self.request.get('password')
        try:
            database = gdbm.open('database.gdbm','cf')
        except:
            time.sleep(0.1)
            database = gdbm.open('database.gdbm','cf')    

        if ('usr'+username).encode('ascii','ignore') in database:
            auth_info = json.loads(
                database[('usr'+username).encode('ascii','ignore')]
            )
            if auth_info['password'] == password:
                session_store = sessions.get_store(request=self.request)
                session = session_store.get_session()
                session['key'] = auth_info['key']
                session['username'] = username
                session_store.save_sessions(self.response)
                database.close()
                self.redirect('/jj70/abstracts')

            else:
                database.close()
                self.redirect('/jj70/login')
            
        else:
            database.close()
            self.redirect('/jj70/login')
Example #11
0
def show_user_timeline(user, since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
    '''
    user - twitter.User object
    since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
    hashtag_list - A list of hashtags if you want to search for particular hashtags
    hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
    tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
    
    Returns the tweet id of the latest tweet.
    '''
    if not user:
        return since_ids
    if not user.protected:
        try:
            timeline = api.GetUserTimeline(user.id, since_id = since_ids)
        except ValueError:
            print 'ValueError'
    else:
        return since_ids
    if not timeline:
        return since_ids
    hashtag_user_db = None
    if hashtag_db_name:
 #       print hashtag_db_name
        hashtag_user_db = gdbm.open(hashtag_db_name+'_hashtag','c')
    if tweet_db_name:
        tweet_user_db = gdbm.open(tweet_db_name+'_tweets','c')
        
    since_ids = show_timeline(timeline, hashtag_db = hashtag_user_db, tweet_db = tweet_user_db, hashtag_list = hashtag_list)
    if hashtag_db_name:
        hashtag_user_db.close()
    if tweet_db_name:
        tweet_user_db.close()
    return since_ids
    def __init__(self, dbname):
        try:
            import gdbm

            gdbm.open(dbname)
            self.__db = shelve.DbfilenameShelf(dbname)
        except:
            self.__db = shelve.open(dbname)
    def scan_log(self, statedb_path, maildb_path, maintainer, email, logfile):
        self._state['maintainer'] = str(maintainer)
        self._state['email'] = email
        self._state['statedb'] = str(statedb_path)
        self._state['maildb'] = str(maildb_path)
        if not os.path.isfile(self._path['log']):
            raise RuntimeError("log file (" + self._path['log'] +
                               ") does not exist")
        log = open(self._path['log'], "r")
        num = re.compile(r".*?([0-9.]+).*?")
        for line in log:
            line = line.rstrip()
            if line.startswith("Regression PASSED"):
                self._state['result'] = 'pass'
            elif line == 'status = pass # execution status':
                # from publish_summary
                self._state['result'] = 'pass'
            elif line == 'status = fail # execution status':
                # from publish_summary
                self._state['result'] = 'fail'
            elif line.startswith('time = '):
                # from publish_summary
                self._state['time:wall'] = re.match(num, line).group(1)
            elif line.startswith("Total wall clock time was"):
                self._state['time:wall'] = re.match(num, line).group(1)
            elif line.startswith("Total CPU        time was"):
                self._state['time:cpu'] = re.match(num, line).group(1)
            elif line.startswith("[result:exit status]"):
                self._state['status'] = re.match(num, line).group(1)
            elif line.startswith("[result:exit condition]"):
                self._state['condition'] = re.match(
                    r"\[result:exit condition\]\s+(.*)", line).group(1)
        log.close()

        for k, v in self._state.iteritems():
            print str(k) + " -> " + str(v)

        divider = ";;"
        self._state['stamp'] = str(
            self._state['script']).split('.')[0] + ":" + self._state['time']
        statedb = gdbm.open(self._state['statedb'], "c")
        statedb[self._state['stamp']] = (str(self._state['result']) if self._state.has_key('result') else "malfunction") + divider + \
                                        (str(self._state['time:wall']) if self._state.has_key('time:wall') else "0") + divider + \
                                        (str(self._state['time:cpu']) if self._state.has_key('time:cpu') else "0")
        statedb.close()
        if not self._state.has_key('result'):
            maildb = gdbm.open(self._state['maildb'], "c")
            maildb[self._state['stamp']] = "malfunction" + divider + str(
                self._state['script']
            ) + divider + self._state['master'] + divider + self._state[
                'master-email'] + divider + logfile
            maildb.close()
        elif self._state['result'] != 'pass':
            maildb = gdbm.open(self._state['maildb'], "c")
            maildb[self._state['stamp']] = "fail" + divider + str(
                self._state['script']) + divider + str(
                    maintainer) + divider + str(email) + divider + logfile
            maildb.close()
Example #14
0
 def __init__(self, filename, readonly):
     if readonly:
         # opens the database in readonly mode so that we don't get
         # conflicts between different processes accessing at the same
         # time. only the dbqueue can modify the database.
         self._dbm = gdbm.open(filename, 'ru')
     else:
         self._dbm = gdbm.open(filename, 'cf')
     feedlib.WordDatabase.__init__(self, self._dbm)
Example #15
0
 def kanwaout(self, out):
     try:
         unicode #python2 need .db ext
         dic = dbm.open(out+'.db', 'c')
     except: #python3 add automatically .db ext
         dic = dbm.open(out, 'c')
     for (k, v) in self.records.items():
         dic[k] = compress(dumps(v))
     dic.close()
Example #16
0
 def __init__(self, filename, readonly):
     if readonly:
         # opens the database in readonly mode so that we don't get
         # conflicts between different processes accessing at the same
         # time. only the dbqueue can modify the database.
         self._dbm = gdbm.open(filename, 'ru')
     else:
         self._dbm = gdbm.open(filename, 'cf')
     feedlib.WordDatabase.__init__(self, self._dbm)
Example #17
0
    def scan_log( self, statedb_path, maildb_path, maintainer, email, logfile ):
        self._state['maintainer'] = str(maintainer)
        self._state['email'] = email
        self._state['statedb'] = str(statedb_path)
        self._state['maildb'] = str(maildb_path)
        if not os.path.isfile(self._path['log']):
            raise RuntimeError("log file (" + self._path['log'] + ") does not exist")
        log = open( self._path['log'], "r" )
        num = re.compile(r".*?([0-9.]+).*?")
        for line in log:
            line = line.rstrip( )
            if line.startswith("Regression PASSED"):
                self._state['result'] = 'pass'
	    elif line == 'OK':
		# from UNIT TEST
		self._state['result'] = 'pass'
	    elif line.startswith("OK (SKIP="):
		#from UNIT TEST (special cases with skipped tests)
		self._state['result'] = 'pass'
            elif line == 'status = pass # execution status':
                # from publish_summary
                self._state['result'] = 'pass'
            elif line == 'status = fail # execution status':
                # from publish_summary
                self._state['result'] = 'fail'
            elif line.startswith('time = '):
                # from publish_summary
                self._state['time:wall'] = re.match(num,line).group(1)
            elif line.startswith("Total wall clock time was"):
                self._state['time:wall'] = re.match(num,line).group(1)
            elif line.startswith("Total CPU        time was"):
                self._state['time:cpu'] = re.match(num,line).group(1)
            elif line.startswith("[result:exit status]"):
                self._state['status'] = re.match(num,line).group(1)
            elif line.startswith("[result:exit condition]"):
                self._state['condition'] = re.match(r"\[result:exit condition\]\s+(.*)",line).group(1)
        log.close( )

        for k, v in self._state.iteritems( ):
            print str(k) + " -> " + str(v)

        divider = ";;"
        self._state['stamp'] = str(self._state['script']).split('.')[0] + ":" + self._state['time']
        statedb = gdbm.open(self._state['statedb'],"c")
        statedb[self._state['stamp']] = (str(self._state['result']) if self._state.has_key('result') else "malfunction") + divider + \
                                        (str(self._state['time:wall']) if self._state.has_key('time:wall') else "0") + divider + \
                                        (str(self._state['time:cpu']) if self._state.has_key('time:cpu') else "0")
        statedb.close( )
        if not self._state.has_key('result'):
            maildb = gdbm.open(self._state['maildb'],"c")
            maildb[self._state['stamp']] = "malfunction" + divider + str(self._state['script']) + divider + self._state['master'] + divider + self._state['master-email'] + divider + logfile
            maildb.close( )
        elif self._state['result'] != 'pass':
            maildb = gdbm.open(self._state['maildb'],"c")
            maildb[self._state['stamp']] = "fail" + divider + str(self._state['script']) + divider + str(maintainer) + divider + str(email) + divider + logfile
            maildb.close( )
Example #18
0
 def opendb(ro=False):
   global jobs
   while jobs == None:
     try:
       if ro and exists(jobsfn):
         jobs = gdbm.open(jobsfn,'r')
       else:
         jobs = gdbm.open(jobsfn,'cs')
     except gdbm.error:
       time.sleep(1)
Example #19
0
 def test_error_conditions(self):
     # Try to open a non-existent database.
     unlink(filename)
     self.assertRaises(gdbm.error, gdbm.open, filename, 'r')
     # Try to access a closed database.
     self.g = gdbm.open(filename, 'c')
     self.g.close()
     self.assertRaises(gdbm.error, lambda: self.g['a'])
     # try pass an invalid open flag
     self.assertRaises(gdbm.error, lambda: gdbm.open(filename, 'rx').close())
Example #20
0
 def opendb(ro=False):
     global jobs
     while jobs == None:
         try:
             if ro and exists(jobsfn):
                 jobs = gdbm.open(jobsfn, 'r')
             else:
                 jobs = gdbm.open(jobsfn, 'cs')
         except gdbm.error:
             time.sleep(1)
Example #21
0
def combine_databases(srcdb, destdb, cutoff=20, start=0):
    try:
        import gdbm
    except ImportError:
        print('combining databases requires the gdbm module. :(')
    print('adding tweets from %s to %s' % (srcdb, destdb))

    db1 = gdbm.open(destdb, 'wf')
    db2 = gdbm.open(srcdb, 'w')

    k = db2.firstkey()
    temp_k = None
    seen = 0
    # if not start:
    #     start = 10**10

    if start:
        seen = 0
        while seen < start:
            k = db2.nextkey(k)
            sys.stdout.write('skipping: %i/%i \r' % (seen, start))
            sys.stdout.flush()
            seen += 1
    
    try:
        while k is not None:
            stats.tweets_seen()
            if (anagramfunctions.length_from_hash(k) < cutoff):
                k = db2.nextkey(k)
                continue                
            stats.passed_filter()
            tweet = _tweet_from_dbm(db2[k])
            if k in db1:
                tweet2 = _tweet_from_dbm(db1[k])
                if anagramfunctions.test_anagram(
                    tweet['tweet_text'],
                    tweet2['tweet_text'] 
                    ):
                    temp_k = db2.nextkey(k)
                    del db2[k]
                    hitmanager.new_hit(tweet, tweet2)
                else:
                    pass
            else:
                db1[k] = _dbm_from_tweet(tweet)
            stats.update_console()
            k = db2.nextkey(k)
            if not k and temp_k:
                k = temp_k
                temp_k = None
    finally:
        db1.sync()
        db1.close()
        db2.close()
Example #22
0
    def run(self, yes):
        global force, verbose, HAVE_HDBM, HAVE_DBM

        if HAVE_GDBM:
            self.archive.cacheDb = gdbm.open(self.archive.cacheFile, 'nf')
        elif HAVE_DBM:
            self.archive.cacheDb = gdbm.open(self.archive.cacheFile, 'n')

        for indexFile in self.archive.list():
            self.processIndexFile(indexFile)

        if HAVE_GDBM:
            self.archive.cacheDb.sync()

        if self.errors > 0 and not force:
            print
            sys.stderr.write("There where %d errors. Bailing out, no harm has been done to archive! Use '--force' to override.\n" % (self.errors))
        else:
            for index0 in range(0, 256):
                level0Name = "%02x" % (index0)
                level0Path = os.path.join(self.archive.filesDir, level0Name)

                if verbose:
                    print (" scanning " + level0Name)

                for index1 in range(0, 256):
                    level1Name = "%02x" % (index1)
                    level1Path = os.path.join(level0Path, level1Name)
                    for filename in os.listdir(level1Path):
                        self.filesInArchive += 1
                        remainder = filename[0:36]
                        fullname = level0Name + level1Name + remainder
                        if fullname not in self.archive.cacheDb:
                            self.filesDeleted += 1
                            path = os.path.join(level1Path, filename)
                            if verbose:
                                print (" deleting " + fullname)
                            if yes:
                                os.remove(path)
                        else:
                            self.filesToKeep += 1


            print
            print ("Files to keep:          %12d" % self.filesToKeep)
            print ("Files deleted:          %12d" % self.filesDeleted)
            print ("Files found in archive: %12d" % self.filesInArchive)
            if not yes:
                print
                print ("This was a dry run. Nothing has been deleted for real. Use '--yes' for a live run.")

        self.archive.cacheDb.close()
Example #23
0
def combine_databases(srcdb, destdb, cutoff=20, start=0):
    try:
        import gdbm
    except ImportError:
        print('combining databases requires the gdbm module. :(')
    print('adding tweets from %s to %s' % (srcdb, destdb))

    db1 = gdbm.open(destdb, 'wf')
    db2 = gdbm.open(srcdb, 'w')

    k = db2.firstkey()
    temp_k = None
    seen = 0
    # if not start:
    #     start = 10**10

    if start:
        seen = 0
        while seen < start:
            k = db2.nextkey(k)
            sys.stdout.write('skipping: %i/%i \r' % (seen, start))
            sys.stdout.flush()
            seen += 1

    try:
        while k is not None:
            stats.tweets_seen()
            if (anagramfunctions.length_from_hash(k) < cutoff):
                k = db2.nextkey(k)
                continue
            stats.passed_filter()
            tweet = _tweet_from_dbm(db2[k])
            if k in db1:
                tweet2 = _tweet_from_dbm(db1[k])
                if anagramfunctions.test_anagram(tweet['tweet_text'],
                                                 tweet2['tweet_text']):
                    temp_k = db2.nextkey(k)
                    del db2[k]
                    hitmanager.new_hit(tweet, tweet2)
                else:
                    pass
            else:
                db1[k] = _dbm_from_tweet(tweet)
            stats.update_console()
            k = db2.nextkey(k)
            if not k and temp_k:
                k = temp_k
                temp_k = None
    finally:
        db1.sync()
        db1.close()
        db2.close()
Example #24
0
    def post(self):
        try:
            database = gdbm.open('database.gdbm','cf')
        except:
            time.sleep(1.0)
            database = gdbm.open('database.gdbm','cf')    

        session_store = sessions.get_store(request=self.request)
        session = session_store.get_session()
        username = session.get('username')
        key = session.get('key')
        database['abstract'+key.encode('ascii','ignore')] = self.request.body
        database.close()
Example #25
0
 def reset(self):
     """ Resets the database - clears the ondisk stores and wipes cache
    """
     self.id_idx_db.close()
     self.num_idx_db.close()
     self.blockdata_db.close()
     self.party_idxa_db.close()
     self.party_idxb_db.close()
     self.mc.flush_all()
     self.id_idx_db = gdbm.open('blockid_idx.db', 'nf')
     self.num_idx_db = gdbm.open('blocknum_idx.db', 'nf')
     self.blockdata_db = gdbm.open('blockdata.db', 'nf')
     self.party_idxa_db = gdbm.open('party_idx_a.db', 'nf')
     self.party_idxb_db = gdbm.open('party_idx_b.db', 'nf')
Example #26
0
    def test_flags(self):
        # Test the flag parameter open() by trying all supported flag modes.
        all = set(gdbm.open_flags)
        # Test standard flags (presumably "crwn").
        modes = all - set('fsu')
        for mode in modes:
            self.g = gdbm.open(filename, mode)
            self.g.close()

        # Test additional flags (presumably "fsu").
        flags = all - set('crwn')
        for mode in modes:
            for flag in flags:
                self.g = gdbm.open(filename, mode + flag)
                self.g.close()
Example #27
0
    def Open(self, indexfile=None):
        if not indexfile:
            indexfile = os.path.join(os.environ['PYPHY'], 'nr.dat.indexed')

        self.db = gdbm.open(indexfile)
        self.datafile = self.db['datafile']
        self.fid = open(self.datafile)
Example #28
0
    def Create(self, infile, outfile):
        db = gdbm.open(outfile, 'n')
        fid = open(infile)

        db['datafile'] = os.path.abspath(infile)

        while 1:
            line = fid.readline()
            if not line or not len(line):
                break

            if line[:3] == 'ID ':
                id = string.split(line)[1]
                start = fid.tell() - len(line)

            elif line[:3] == 'AC ':
                acc = string.split(line)[1]
                if acc[-1] == ';':
                    acc = acc[:-1]

            elif line[:2] == '//':
                stop = fid.tell()
                try:
                    value = '%d %d' % (start, stop)
                    db[id] = value
                    db[acc] = value
                    id, acc, start, stop = None, None, None, None
                except:
                    print("AARRGGGG %d %d %s %s" %
                          (start, stop, type(start), type(stop)))
                    print("%s %s" % (id, acc))

        db.close()
        fid.close()
def getBookmarkDict(filename):
	'''
	Input: Absolute path of browser bookmark backup file
	Creates/Updates Bookamrk-Manager database
	Returns a dictionary object with bookmark url as key and (title, tag, add_date) tuple as value. 
	'''
	f = gdbm.open('bookmarkDB','c')
	bookmark_dict = fetchBookmarks(filename)
	
	if bookmark_dict:
		for key in bookmark_dict:
			f[key] = pickle.dumps(bookmark_dict[key])
	
	if f.keys():		
		bookmark_dict = {}
	for key in f.keys():
		bookmark_dict[key] = pickle.loads(f[key])
	if not f.has_key('@author@'):
		name = 'Jay Rambhia'
		email = '*****@*****.**'
		add_date = time.time()
		f['@author@'] = pickle.dumps((name, email, add_date))
	print 'bookmarks saved'
	f.close()	
	return bookmark_dict
Example #30
0
    def Open(self, indexfile=None):
        if not indexfile:
            indexfile = os.path.join(os.environ['PYPHY'], 'nr.dat.indexed')

        self.db = gdbm.open(indexfile)
        self.datafile = self.db['datafile']
        self.fid = open(self.datafile)
Example #31
0
def get_repo_data(repo_ids):
    """
    Find, open, and return the gdbm database file associated with each repo
    plus that repo's publish protocol

    :param repo_ids: list of repository IDs.
    :type  repo_ids: list

    :return:    dictionary where keys are repo IDs, and values are dicts that
                contain an open gdbm database under key "db", and a protocol
                under key "protocol".
    :rtype:     dict
    """
    ret = {}
    for distributor in RepoDistributorManager.find_by_repo_list(repo_ids):
        publish_protocol = _get_protocol_from_distributor(distributor)
        protocol_key, protocol_default_value = PROTOCOL_CONFIG_KEYS[
            publish_protocol]
        repo_path = distributor['config'].get(protocol_key,
                                              protocol_default_value)
        repo_id = distributor['repo_id']
        db_path = os.path.join(repo_path, repo_id,
                               constants.REPO_DEPDATA_FILENAME)
        try:
            ret[repo_id] = {
                'db': gdbm.open(db_path, 'r'),
                'protocol': publish_protocol
            }
        except gdbm.error:
            _LOGGER.error(
                'failed to find dependency database for repo %s. re-publish to fix.'
                % repo_id)
    return ret
Example #32
0
def get_repo_data(repo_ids):
    """
    Find, open, and return the gdbm database file associated with each repo
    plus that repo's publish protocol

    :param repo_ids: list of repository IDs.
    :type  repo_ids: list

    :return:    dictionary where keys are repo IDs, and values are dicts that
                contain an open gdbm database under key "db", and a protocol
                under key "protocol".
    :rtype:     dict
    """
    ret = {}
    for distributor in RepoDistributorManager.find_by_repo_list(repo_ids):
        publish_protocol = _get_protocol_from_distributor(distributor)
        protocol_key, protocol_default_value = PROTOCOL_CONFIG_KEYS[publish_protocol]
        repo_path = distributor['config'].get(protocol_key, protocol_default_value)
        repo_id = distributor['repo_id']
        db_path = os.path.join(repo_path, repo_id, constants.REPO_DEPDATA_FILENAME)
        try:
            ret[repo_id] = {'db': gdbm.open(db_path, 'r'), 'protocol': publish_protocol}
        except gdbm.error:
            _LOGGER.error('failed to find dependency database for repo %s. re-publish to fix.' %
                          repo_id)
    return ret
Example #33
0
def main():
    n = 42
    f = gdbm.open('contacts', 'c')

    while n != 5:
        print '1. Add contact\n2. Search contact\n3. Show All contacts\n4. List according to char\n5. Quit'
        while True:
            try:
                n = int(raw_input('Enter: '))
                break
            except ValueError:
                print 'Please choose a valid option'
        if n == 1:
            f = add_contact(f)
        elif n == 2:
            search_contact(f)
        elif n == 5:
            print 'Quit.\n'
            return
        elif n == 3:
            show_list(f)
        elif n == 4:
            c = raw_input('Enter the character.')
            show_alpha_list(f, c)
        else:
            print 'Please enter a valid option'

    return
Example #34
0
    def add_repodata(self, model):
        """
        Given a model, add the "repodata" attribute to it (which includes raw
        XML used for publishing), and add the "files" and "changelog" attributes
        based on data obtained in the raw XML snippets.

        :param model:   model instance to manipulate
        :type  model:   pulp_rpm.plugins.db.models.RpmBase
        """
        db_key = self.generate_db_key(model.unit_key)
        for filename, metadata_key, process_func in (
            (filelists.METADATA_FILE_NAME, 'files', filelists.process_package_element),
            (other.METADATA_FILE_NAME, 'changelog', other.process_package_element)
        ):
            try:
                db_file = gdbm.open(self.dbs[filename], 'r')
                raw_xml = db_file[db_key]
            finally:
                db_file.close()
            model.set_repodata(filename, raw_xml)
            element = ElementTree.fromstring(raw_xml)
            unit_key, items = process_func(element)
            setattr(model, metadata_key, items)

        primary_raw_xml = model.raw_xml.decode('utf-8', 'replace')
        primary_raw_xml = change_location_tag(primary_raw_xml, model.filename)
        model.set_repodata('primary', primary_raw_xml.encode('utf-8'))
Example #35
0
    def generate_dbs(self):
        """
        For repo data files that contain data we need to access later for each
        unit in the repo, generate a local db file that gives us quick read
        access to each unit's data.

        :raises PulpCodedException: if there is some inconsistency in metadata
        """
        package_count = {}
        for filename, tag, process_func in (
            (filelists.METADATA_FILE_NAME,
             filelists.PACKAGE_TAG, filelists.process_package_element),
            (other.METADATA_FILE_NAME, other.PACKAGE_TAG, other.process_package_element),
        ):
            with contextlib.closing(self.get_metadata_file_handle(filename)) as xml_file_handle:
                generator = package_list_generator(xml_file_handle, tag)
                db_filename = os.path.join(self.dst_dir, '%s.db' % filename)
                # always a New file, and open with Fast writing mode.
                with contextlib.closing(gdbm.open(db_filename, 'nf')) as db_file_handle:
                    for element in generator:
                        utils.strip_ns(element)
                        element.attrib['pkgid'] = models.RpmBase.PKGID_TEMPLATE
                        raw_xml = utils.element_to_raw_xml(element)
                        unit_key, _ = process_func(element)
                        db_key = self.generate_db_key(unit_key)
                        db_file_handle[db_key] = raw_xml
                    db_file_handle.sync()
                    package_count[filename] = len(db_file_handle)
            self.dbs[filename] = db_filename
        if package_count[filelists.METADATA_FILE_NAME] != package_count[other.METADATA_FILE_NAME]:
            reason = ('metadata is specified for different set of packages in filelists.xml'
                      ' and in other.xml')
            raise PulpCodedException(error_code=error_codes.RPM1015, reason=reason)
        self.rpm_count = package_count[filelists.METADATA_FILE_NAME]
 def __init__(self, filename):
     self.db = None
     if filename.endswith(".db"):
         try:
             self.db = gdbm.open(filename, "r")
         except gdbm.error, err:
             print >> sys.stderr, "Unable to open binary database %s: %s" % (filename, err)
Example #37
0
    def _generate_dependency_data(self, modules):
        """
        generate the dependency metadata that is required to provide the API
        that the "puppet module" tool uses. Store the metadata in a gdbm
        database at the root of the repo. Generating and storing it at publish
        time means the API requests will always return results that are in-sync
        with the most recent publish and are not influenced by more recent
        changes to the repo or its contents.

        :type modules: list of pulp.plugins.model.AssociatedUnit
        """
        filename = os.path.join(self._build_dir(), constants.REPO_DEPDATA_FILENAME)
        _LOG.debug('generating dependency metadata in file %s' % filename)
        # opens a new file for writing and overwrites any existing file
        db = gdbm.open(filename, 'n')
        try:
            for module in modules:
                version = module.unit_key['version']
                deps = module.metadata.get('dependencies', [])
                path = os.path.join(self._repo_path, self._build_relative_path(module))
                value = {'file' : path, 'version' : version, 'dependencies' : deps}

                name = module.unit_key['name']
                author = module.unit_key['author']
                key = '%s/%s' % (author, name)

                if db.has_key(key):
                    module_list = json.loads(db[key])
                else:
                    module_list = []
                module_list.append(value)
                db[key] = json.dumps(module_list)
        finally:
            db.close()
Example #38
0
def check_uids():
    if len(sys.argv) < 2:
        print_help()
        return (3)

    domainname = sys.argv[1]
    if domainname == "":
        return (2)

    map_file = "/var/yp/%s/passwd.byuid" % domainname
    try:
        fh = gdbm.open(map_file, 'r')
        if not fh:
            return (2)
    except gdbm.error as e:
        sys.stderr.write("Cannot open the %s NIS map file for reading: %s" %
                         (map_file, e))
        return (2)

    uid = fh.firstkey()
    while uid:
        uid = fh.nextkey(uid)
        if uid_is_dangerous(uid):
            return 0

    fh.close()
    return 1
    def __init__(self, filename = "/usr/lib64/avahi/service-types.db"):

        self.db = gdbm.open(filename, "r")

        l = locale.getlocale(locale.LC_MESSAGES)

        self.suffixes = ()

        if not l[0] is None:

            if not l[1] is None:
                self.suffixes += (l[0] + "@" + l[1], )

            self.suffixes += (l[0], )

            i = l[0].find("_")

            if i >= 0:

                k = l[0][:i]

                if not l[1] is None:
                    self.suffixes += (k + "@" + l[1], )

                self.suffixes += (k, )


        self.suffixes = tuple(map(lambda x:  "["+x+"]", self.suffixes)) + ("", )
Example #40
0
 def __init__(self, fn, mode, max_age=None):
     self.max_age = max_age
     self.db = gdbm.open(fn, mode)
     self.reorganize_timer = None
     self.sync_timer = None
     self.start_reorganizing()
     self.start_syncing()
    def __init__(self,
                 filename="/usr/lib/x86_64-linux-gnu/avahi/service-types.db"):

        self.db = gdbm.open(filename, "r")

        l = locale.getlocale(locale.LC_MESSAGES)

        self.suffixes = ()

        if not l[0] is None:

            if not l[1] is None:
                self.suffixes += (l[0] + "@" + l[1], )

            self.suffixes += (l[0], )

            i = l[0].find("_")

            if i >= 0:

                k = l[0][:i]

                if not l[1] is None:
                    self.suffixes += (k + "@" + l[1], )

                self.suffixes += (k, )

        self.suffixes = tuple(map(lambda x: "[" + x + "]",
                                  self.suffixes)) + ("", )
Example #42
0
def clean_database(databasePathname):
    """function to remove dead nodes from the hash db"""
    print '# loading database ' + databasePathname
    try:
        db = gdbm.open(databasePathname, 'w')
    except:
        print "# " + databasePathname + " could not be loaded"
        sys.exit(-1)

    # even though gdbm supports memory efficient iteration over
    # all keys, I want to order my traversal across similar
    # paths to leverage caching of directory files:
    allKeys=db.keys()
    print '# finished loaded keys from ' + databasePathname
    allKeys.sort()
    print '# finished sorting keys from ' + databasePathname
    print '# deleting dead nodes'
    count=0
    for currKey in allKeys:
        try:
            os.stat(currKey)
            sys.stdout.write('.')
        except OSError:
            del db[currKey]
            sys.stdout.write('*')
            count=count+1
        sys.stdout.flush()
    print "\n# reorganizing " + databasePathname
    db.reorganize()
    db.sync()
    db.close()
    print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'
Example #43
0
 def open_gdbm(*args):

    """Open a gdbm database."""

    import gdbm

    return gdbm.open(*args)
Example #44
0
 def __init__(self, fn, mode, max_age=None):
     self.max_age = max_age
     self.db = gdbm.open(fn, mode)
     self.reorganize_timer = None
     self.sync_timer = None
     self.start_reorganizing()
     self.start_syncing()
Example #45
0
 def generate_dbs(self):
     """
     For repo data files that contain data we need to access later for each
     unit in the repo, generate a local db file that gives us quick read
     access to each unit's data.
     """
     for filename, tag, process_func in (
         (filelists.METADATA_FILE_NAME, filelists.PACKAGE_TAG,
          filelists.process_package_element),
         (other.METADATA_FILE_NAME, other.PACKAGE_TAG,
          other.process_package_element),
     ):
         xml_file_handle = self.get_metadata_file_handle(filename)
         try:
             generator = package_list_generator(xml_file_handle, tag)
             db_filename = os.path.join(self.dst_dir, '%s.db' % filename)
             # always a New file, and open with Fast writing mode.
             db_file_handle = gdbm.open(db_filename, 'nf')
             try:
                 for element in generator:
                     utils.strip_ns(element)
                     raw_xml = utils.element_to_raw_xml(element)
                     unit_key, _ = process_func(element)
                     db_key = self.generate_db_key(unit_key)
                     db_file_handle[db_key] = raw_xml
                 db_file_handle.sync()
             finally:
                 db_file_handle.close()
         finally:
             xml_file_handle.close()
         self.dbs[filename] = db_filename
Example #46
0
def getBookmarkDict(filename):
    '''
	Input: Absolute path of browser bookmark backup file
	Creates/Updates Bookamrk-Manager database
	Returns a dictionary object with bookmark url as key and (title, tag, add_date) tuple as value. 
	'''
    f = gdbm.open('bookmarkDB', 'c')
    bookmark_dict = fetchBookmarks(filename)

    if bookmark_dict:
        for key in bookmark_dict:
            if not f.has_key(key):
                f[key] = pickle.dumps(bookmark_dict[key])

    if f.keys():
        bookmark_dict = {}
    for key in f.keys():
        bookmark_dict[key] = pickle.loads(f[key])
    if not f.has_key('@author@'):
        name = 'Jay Rambhia'
        email = '*****@*****.**'
        add_date = time.time()
        f['@author@'] = pickle.dumps((name, email, add_date))
    print 'bookmarks saved'
    f.close()
    return bookmark_dict
Example #47
0
    def add_repodata(self, model):
        """
        Given a model, add the "repodata" attribute to it (which includes raw
        XML used for publishing), and add the "files" and "changelog" attributes
        based on data obtained in the raw XML snippets.

        :param model:   model instance to manipulate
        :type  model:   pulp_rpm.plugins.db.models.RPM
        """
        repodata = model.metadata.setdefault('repodata', {})
        db_key = self.generate_db_key(model.unit_key)
        for filename, metadata_key, process_func in (
            (filelists.METADATA_FILE_NAME, 'files',
             filelists.process_package_element),
            (other.METADATA_FILE_NAME, 'changelog',
             other.process_package_element)):
            try:
                db_file = gdbm.open(self.dbs[filename], 'r')
                raw_xml = db_file[db_key]
            finally:
                db_file.close()
            repodata[filename] = raw_xml
            element = ElementTree.fromstring(raw_xml)
            unit_key, items = process_func(element)
            model.metadata[metadata_key] = items

        raw_xml = model.raw_xml
        repodata['primary'] = change_location_tag(raw_xml, model.relative_path)
Example #48
0
    def Create(self, infile, outfile):
        db = gdbm.open(outfile, 'n')
        fid = open(infile)

        db['datafile'] = os.path.abspath(infile)

        while True:
            line = fid.readline()
            if not line or not len(line):
                break

            if line[:3] == 'ID ':
                id = string.split(line)[1]
                start = fid.tell() - len(line)

            elif line[:3] == 'AC ':
                acc = string.split(line)[1]
                if acc[-1] == ';':
                    acc = acc[:-1]

            elif line[:2] == '//':
                stop = fid.tell()
                try:
                    value = '%d %d' % (start, stop)
                    db[id] = value
                    db[acc] = value
                    id, acc, start, stop = None, None, None, None
                except:
                    print("AARRGGGG %d %d %s %s" % (start, stop, type(start), type(stop)))
                    print("%s %s" % (id, acc))

        db.close()
        fid.close()
Example #49
0
def show_timeline(timeline, hashtag_db=None, tweet_db = None, hashtag_list=None):
    for i in range(len(timeline)-1,-1,-1):
        ids = timeline[i].id
        screen_name = '@'+timeline[i].user.screen_name
        user_name = timeline[i].user.name
        text = timeline[i].text
        tweet = screen_name+' ('+user_name+') '+': '+text
        print tweet
        res = get_hashtag(text)
        if hashtag_list:
            for j in range(len(hashtag_list)):
                if not hashtag_list[j].startswith('#'):
                    hashtag_list[j]='#'+hashtag_list[j]
                if hashtag_list[j] in res:
             #      print "opening",hashtag_list[j]+"_hashtag"
                    py_db = gdbm.open(hashtag_list[j]+'_hashtag','c')
                    py_db[str(timeline[i].id)] = repr(tweet)
                    py_db.close()
        if res:
  #          print hashtag_db
            if hashtag_db is not None:
   #             print 'save_hashtag'
                hashtag_db = save_hashtag(res, hashtag_db)
            if tweet_db is not None:
                tweet_db = save_tweet(ids, tweet, tweet_db)
    return timeline[0].id