def main(): basedir = '/Users/yhino/UHD/dev/jacla/src' import sys from datetime import datetime import traceback sys.path.append(basedir+'/lib') from buzhug import Base try: db = Base(basedir+'/db/jbims.db') db.create( ('id' , str), ('band_name' , str), ('genre' , str), ('leader_name' , str), ('leader_mail' , str), ('passwd' , str), ('member_num' , int), ('member' , str), ('part' , str), ('music_name' , str), ('music_time' , str), ('music_genre' , str), ('music_comp' , str), ('comment' , str), ('live_entry' , bool), ('stage_setting', str), ('stage_info' , str), ('add_dt' , datetime), ('upd_dt' , datetime) , mode="override") except: print "buzhug error." traceback.print_exc()
def __new__(mcls, name, bases, dic): if not 'fields' in dic: return type.__new__(ObmMeta, name, bases, dic) base = Base("obm_%s" % name.lower()) fields = [ (field[0], getattr(field[1], 'bhbase', field[1])) for field in dic['fields'] ] dic['bhbase'] = base.create(*fields, **{'mode':'open'}) return type.__new__(ObmMeta, name, bases, dic)
def __init__(self, path): self.path = path self.dbh_stored_blocks = Base(self.path) try: self.dbh_stored_blocks.create(('key', str), ('version', str)) except IOError: pass
class DataBase(): def __init__(self): self.db=Base(mypath) self.sfield = {'शब्द': 'Words', 'पद': 'Speech', 'अर्थ': 'Meaning', 'पर्यायवाचि': 'Synonym', 'विपरीतार्थक': 'Antonym', 'अंग्रेजी': 'English'} self.db.open() def _get_attribute(self,args): ret_val = {} for k,f in self.sfield.items(): ret_val[k] = getattr(args, f) if ret_val[k] == None: ret_val.pop(k) return ret_val def select(self,value): self.record = self.db.select([f for f in self.sfield.itervalues()],Words=value) for v in self.record: self.g = self._get_attribute(v) return self.g def fields(self): return self.db.field_names def get_field_length(self, value): t = self.select(value) return len(self.g)
class Dao: def __init__(self, dbname): self.db = Base(dbname).open() self.db.set_string_format(unicode, 'utf-8') def regist_admin(self, data): return self.db.insert( generation = data.get('generation'), dept = data.get('dept'), name_sei = data.get('name_sei'), name_mei = data.get('name_mei'), mail = data.get('mail'), account = data.get('account'), passwd = data.get('crypt_passwd'), add_dt = datetime.now() ) def get_admins(self): result = self.db.select() return result def close(self): self.db.close() def __del__(self): self.db.close()
def __init__(self): self.userdir = os.getcwd() self.bookDB = Base(self.userdir + '/db/bookDB').open() #this selects the whole db, for now self.resultset = self.bookDB.select() #for loading the db in memory self.membooks = []
def __init__(self): self.db=Base(mypath) self.sfield = {'शब्द': 'Words', 'पद': 'Speech', 'अर्थ': 'Meaning', 'पर्यायवाचि': 'Synonym', 'विपरीतार्थक': 'Antonym', 'अंग्रेजी': 'English'} self.db.open()
def open(self): self.db = Base(TransactionsDB.BASE) try: self.db.open() except IOError: self.db.create(('amount' , float), ('amount_local', float), ('date' , date), ('account' , str), ('label' , str), ('currency' , str))
def __init__(self, storage_path): self.dbh_objects = Base(os.path.join(storage_path, 'objects')) self.dbh_blocks = Base(os.path.join(storage_path, 'blocks')) self.dbh_replicas = Base(os.path.join(storage_path, 'replicas')) self.dbh_tree = Base(os.path.join(storage_path, 'tree')) self.dbh_paths = Base(os.path.join(storage_path, 'paths')) self.dbh_id = Base(os.path.join(storage_path, 'id')) self.dbh_tags = Base(os.path.join(storage_path, 'tags')) self.storage_path = storage_path
def nested_join(db1, db2, alias = None): name = alias fields = [] for a in db1.fields: if a != '__version__' and a != '__id__': fields.append((a, db1.fields[a])) for a in db2.fields: if a != '__version__' and a != '__id__': if (a, db2.fields[a]) not in fields: fields.append((a, db2.fields[a])) else: fields.remove((a, db2.fields[a])) fields.append((db1.name[db1.name.rfind("/")+1:]+"."+a, db1.fields[a])) fields.append((db1.name[db1.name.rfind("/")+1:]+"."+a, db2.fields[a])) if alias == None: name = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) newDB = Base("tables/"+name) else: newDB = Base("tables/"+name) # print fields newDB.create(*tuple(fields)) for record1 in db1: for record2 in db2: rec = [] for f in db1.field_names: if f != '__id__' and f != '__version__': rec.append(getattr(record1, f)) for f in db2.field_names: if f != '__id__' and f != '__version__': rec.append(getattr(record2, f)) newDB.insert(*tuple(rec)) return (newDB, name)
def main(): basedir = '/Users/yhino/UHD/dev/jacla/src' import sys from datetime import datetime import traceback sys.path.append(basedir+'/lib') from buzhug import Base try: db = Base(basedir+'/db/admins.db') db.create( ('generation' , str), ('dept' , str), ('name_sei' , str), ('name_mei' , str), ('mail' , str), ('account' , str), ('passwd' , str), ('add_dt' , datetime), ('upd_dt' , datetime) , mode="override") except: print "buzhug error." traceback.print_exc()
def sync_database(self): if not os.path.isdir(Settings.DB_DIR): os.mkdir(Settings.DB_DIR) if not os.path.isdir(os.path.join(Settings.DB_DIR, 'Posts')): from datetime import datetime try: self.db = Base(os.path.join(Settings.DB_DIR, 'Posts')).create( ("messageid", str), ("postnumber", float), ("poster", unicode), ("title", unicode), ("description", unicode), ("tag", unicode), ("posted", datetime), ("category", int), ("subcategory_codes", list), ("image", dict), ("website", unicode), ("size", float), ("nzb", list)) except: pass
def main(): from buzhug import Base try: db = Base('oadb').open() except IOError: print('Error opening oadb') import os from qsequence import QSequence #seq = QSequence(filename='welcome.txt') #seq = QSequence(filename='sky.txt') #seq = QSequence(filename='http://openallure.wikia.com/wiki/Evaluation_of_Software_Architecture_Structural_Quality') #seq = QSequence(filename='Structural Quality of Software Architectures.txt') seq = QSequence( filename= 'http://openallureds.ning.com/profiles/blogs/open-allure-script-for-the' )
def sync_database(self): if not os.path.isdir(Settings.DB_DIR): os.mkdir(Settings.DB_DIR) if not os.path.isdir(os.path.join(Settings.DB_DIR, 'Filters')): try: self.db = Base(os.path.join( Settings.DB_DIR, 'Filters')).create( ("name", unicode), ("description", unicode), ("query", unicode), ("poster", unicode), ("tag", unicode), ("category_code", int), ("subcategory_codes", dict), ("max_age", int), ("p**n", bool)) except: pass else: try: for filter in Settings.DEFAULT_FILTERS: self.add_filter_to_database(Filter(filter)) except: pass
def main(): """Initialization and event loop""" # provide instructions and other useful information # initialize pyGame screen textRect = pygame.rect.Rect(0, 0, 640, 480) screenRect = pygame.rect.Rect(0, 0, 640, 480) pygame.init() pygame.display.set_caption(u"Open Allure") screen = pygame.display.set_mode(screenRect.size) if sys.platform != 'darwin': pygame.scrap.init() config = ConfigObj('openallure.cfg') # determine what language to use for string translations # this can be overridden in scripts gettext.install(domain='openallure', localedir='locale', unicode=True) try: language = config['Options']['language'] except KeyError: language = 'en' if len(language) > 0 and language != 'en': mytrans = gettext.translation(u"openallure", localedir='locale', languages=[language], fallback=True) mytrans.install(unicode=True) # must set explicitly here for Mac # language also determines which default systemVoice to use (Mac only) openallure.systemVoice = '' try: openallure.systemVoice = config['Voice'][language] except KeyError: pass # load initial question sequence from url specified in openallure.cfg file url = unicode(config['Source']['url']) if len(sys.argv) > 1 and 0 != len(sys.argv[1]): url = unicode(sys.argv[1]) backgroundColor = eval(config['Colors']['background']) seq = QSequence(filename=url) try: openallure.systemVoice = config['Voice'][seq.language] except KeyError: pass # open database to track progress oadb = config['Data']['oadb'] try: openallure.db = Base(oadb).open() except IOError: openallure.db = Base(oadb) openallure.db.create(('time',float), ('url',unicode), \ ('q',int), ('a',int), ('cmd',unicode)) # read configuration options delayTime = int(config['Options']['delayTime']) openallure.allowNext = int(config['Options']['allowNext']) # initialize chatbot openallure_chatbot = Chat(responses, reflections) chatHistory = [] onChatHistory = -1 # track when Open Allure has gained mouse focus openallure.gain = 1 # mouse focus only matters when stickyBrowser is true (see openallure.cfg) openallure.stickBrowser = eval(config['Options']['stickyBrowser']) voice = Voice() margins = eval(config['Font']['margins']) text = OpenAllureText(margins) # start on first question of sequence # TODO: have parameter file track position in sequence at quit # and resume there on restart openallure.onQuestion = 0 # initialize mode flags # Has new question from sequence been prepared? openallure.ready = False # Has question been openallure.stated (read aloud)? openallure.stated = False # Which question in sequence has been read aloud (to avoid re-reading it)? # Note: -1 indicates no question as valid questions start with 0 openallure.statedq = -1 # What choice (if any) has been highlighted by gesture or keyboard? highlight = 0 # When was the statement of the question complete? delayStartTime = 0 # Do we have an answer? what number is it (with 0 being first answer)? answer = -1 # What questions have been shown (list)? # Note: This list is also checked to avoid re-stating a question openallure.questions = [] # What has been typed in so far openallure.currentString = u"" # Subprocesses # graphViz = None # openallure.showResponses = eval(config['GraphViz']['showResponses']) # openallure.showText = eval(config['GraphViz']['showText']) # openallure.showLabels = eval(config['GraphViz']['showLabels']) # graphVizPath = config['GraphViz']['path'] #if eval(config['GraphViz']['autoStart']): # oagraph(seq,openallure.db,url,openallure.showText,openallure.showResponses,openallure.showLabels) # graphViz = subprocess.Popen([graphVizPath,'oagraph.dot']) # Greetings #voice.speak('Hello') WELCOME_TEXT = [ "", _(u" Welcome to the Open Allure Dialog System."), "", _(u" Keys:"), _(u" Escape quits"), _(u" Ctrl+I force input"), _(u" Ctrl+R refresh"), _(u" Ctrl+V paste"), "", _(u" Commands:"), _(u" exit"), _(u" open <filename or url>"), _(u" quit"), _(u" return (resumes at last question)"), _(u" show source (Mac only)"), "" ] for line in WELCOME_TEXT: print line runFlag = True while runFlag: if not openallure.ready: # prepare for question display openallure.question = seq.sequence[openallure.onQuestion] choiceCount, \ questionText, \ justQuestionText = text.buildQuestionText(openallure.question) # if graphViz: # # Create .dot file for new sequence # graphViz.kill() # oagraph(seq,openallure.db,url,openallure.showText,openallure.showResponses,openallure.showLabels) # graphViz = subprocess.Popen([graphVizPath, 'oagraph.dot']) textRegions = text.writewrap(None, text.font, text.boundingRectangle, text.unreadColor, questionText[-1]) # initialize pointers - no part of the question text # and none of the answers have been read aloud. # Note that question text is numbered from 0 # while answers are numbered from 1. action = 0 onAnswer = 0 onText = 0 openallure.stated = False if openallure.onQuestion in openallure.questions: openallure.stated = True # initialize selections - nothing has been highlighted # or previously selected as an answer answer = -1 choice = (-1, 0) colorLevel = colorLevels = 12 highlight = 0 # initialize typed input openallure.currentString = u'' # clear screen of last question screen.fill(backgroundColor, rect=textRect) # wait for prior speaking to finish if voice.pid_status > 0: try: os.waitpid(voice.pid_status, 0)[1] except: pass voice.pid_status = 0 openallure.ready = True # clear any previous response nltkResponse = '' # start with gain openallure.gain = 1 # arrival record for new question record_id = openallure.db.insert(time = time.time(), \ url = unicode(url), q = openallure.onQuestion) # make sure currentString has been added to questionText if openallure.currentString: questionText[choiceCount] = questionText[choiceCount - 1] + \ "\n" + openallure.currentString # get keyboard and mouse input mouseButtonDownEvent = False mouseButtonDownEventY = 0 for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: mouseButtonDownEvent = True YCOORDINATE = 1 mouseButtonDownEventY = event.pos[YCOORDINATE] if event.type == pygame.QUIT \ or (event.type == pygame.KEYDOWN and \ event.key == pygame.K_ESCAPE): # if graphViz: # graphViz.kill() runFlag = False # Trap and quit on Ctrl + C elif (event.type == pygame.KEYDOWN and event.key == pygame.K_c and pygame.key.get_mods() & pygame.KMOD_CTRL): return # Trap Ctrl + I to force input elif (event.type == pygame.KEYDOWN and event.key == pygame.K_i and pygame.key.get_mods() & pygame.KMOD_CTRL): # Note: This kills the entire current sequence. # The return command gives a way to back to it. seq.inputs = [_(u"Input"), _(u"[input];")] seq.sequence = seq.regroup(seq.inputs, \ seq.classify(seq.inputs)) openallure.onQuestion = 0 url = _(u'[input]') # record call to input record_id = openallure.db.insert(time = time.time(), \ url = unicode(url), q = 0) openallure.ready = False # Trap and paste clipboard on Ctrl + V for Mac elif (event.type == pygame.KEYDOWN and event.key == pygame.K_v and pygame.key.get_mods() & pygame.KMOD_CTRL): if sys.platform == 'darwin': os.system('pbpaste > clipboard') clipboard = open('clipboard').readlines() if clipboard[0].startswith(u"http://") or \ clipboard[0].find(u"http://"): openallure.currentString += clipboard[0] else: clipboard = pygame.scrap.get(pygame.SCRAP_TEXT) if clipboard.startswith(u"http://") or \ clipboard.find(u"http://"): openallure.currentString += clipboard # Trap Ctrl + - (minus) to decrease font size elif (event.type == pygame.KEYDOWN and event.key == pygame.K_MINUS and pygame.key.get_mods() & pygame.KMOD_CTRL): text.fontSize -= 5 text.font = pygame.font.SysFont( text.fontName, \ text.fontSize ) # Trap Ctrl + + (plus) to increase font size elif (event.type == pygame.KEYDOWN and event.key == pygame.K_EQUALS and pygame.key.get_mods() & pygame.KMOD_CTRL): text.fontSize += 5 text.font = pygame.font.SysFont( text.fontName, \ text.fontSize ) # Trap Ctrl + R to refresh from url without changing question number elif (event.type == pygame.KEYDOWN and event.key == pygame.K_r and pygame.key.get_mods() & pygame.KMOD_CTRL): # if url is nltkRespose.txt, look back for something else # worth refreshing if url == u'nltkResponse.txt': for id in range(record_id - 1, -1, -1): record = openallure.db[id] if not record.url in (url, \ u'nltkResponse.txt', \ _(u'[input]')): url = record.url break seq = QSequence(filename=url) try: openallure.systemVoice = config['Voice'][seq.language] except KeyError: pass openallure.ready = False # Define toggle keys and capture string inputs elif event.type == pygame.KEYDOWN: # Keys 1 through 6 select choices 1 through 6 if event.key in range(pygame.K_1, pygame.K_6) and \ (not openallure.question[INPUTFLAG][choiceCount - 1] == 1 or (openallure.question[INPUTFLAG][choiceCount - 1] == 1 and openallure.currentString == u'')): answer = event.key - pygame.K_1 if answer < choiceCount: # Record choice along with destination, if any record_id = openallure.db.insert(time = time.time(), \ url = unicode(url), q = openallure.onQuestion, \ a = answer, cmd = unicode(openallure.question[DESTINATION][answer])) # if graphViz: # graphViz.kill() # oagraph(seq,openallure.db,url,openallure.showText,openallure.showResponses,openallure.showLabels) # graphViz = subprocess.Popen([graphVizPath, 'oagraph.dot']) choice = (answer + 1, 0) colorLevel = 0 # Update screen to reflect choice text.paintText(screen, justQuestionText, onText, questionText, onAnswer, highlight, openallure.stated, choice, colorLevel, colorLevels) pygame.display.flip() else: answer = -1 elif event.key == pygame.K_F6: # reveal all the attributes of openallure print "\nCurrent values of openallure object variables:\n" for item in openallure.__dict__: print item + ":", openallure.__dict__[item] # drop into interpreter for debugging print "\n Press Ctrl+D to close console and resume. " + \ "Enter exit() to exit.\n" import code code.interact(local=locals()) # Allow space to silence reading of question # unless there is an input (which might require a space) elif event.key == pygame.K_SPACE and \ not openallure.question[INPUTFLAG][choiceCount - 1] == 1: # Silence reading of question openallure.stated = True elif event.key == pygame.K_RIGHT and openallure.allowNext: # Choice is first non-zero entry # in openallure.question[ACTION] onChoice = 0 for i in openallure.question[ACTION]: onChoice += 1 if not i == 0: answer = onChoice - 1 record_id = openallure.db.insert(time = time.time(), \ url = unicode(url), q = openallure.onQuestion, \ a = answer, cmd = unicode(openallure.question[DESTINATION][answer])) # if graphViz: # graphViz.kill() # oagraph(seq,openallure.db,url,openallure.showText,openallure.showResponses,openallure.showLabels) # graphViz = subprocess.Popen([graphVizPath, 'oagraph.dot']) choice = (onChoice, 0) break del onChoice elif event.key == pygame.K_LEFT: if len(openallure.questions) > 0: openallure.onQuestion = openallure.questions.pop() openallure.ready = False else: openallure.onQuestion = 0 elif event.key == pygame.K_UP: if len(chatHistory) > 0 and onChatHistory > 0: onChatHistory -= 1 openallure.currentString = chatHistory[onChatHistory] elif event.key == pygame.K_DOWN: if len(chatHistory) > 0 and \ onChatHistory < len(chatHistory) - 1: onChatHistory += 1 openallure.currentString = chatHistory[onChatHistory] elif event.key == pygame.K_RETURN: if openallure.currentString: # add to history chatHistory.append(openallure.currentString) onChatHistory = len(chatHistory) # record input string record_id = openallure.db.insert(time = time.time(), \ url = unicode(url), q = openallure.onQuestion, \ a = answer, cmd = openallure.currentString) # Check for rules from script at end of question 0 if len(seq.sequence[0]) > 9: scriptRules = seq.sequence[0][RULE] else: scriptRules = None nltkResponse, \ nltkType, \ nltkName = \ openallure_chatbot.respond(openallure.currentString, \ scriptRules) # Act on commands if nltkType == 'goto' or \ (nltkType == 'text' and nltkName == 'what now'): # find question with goto tag = ruleName or # currentString (if it didn't match anything else) if openallure.question[QUESTION] == [_(u"Input")]: # Back up to first non-Input, non-Sorry question for id in range(record_id - 1, -1, -1): try: record = openallure.db[id] if not record.url in ( url, u'nltkResponse.txt', _(u'[input]')): seq = QSequence(filename = record.url, \ path = seq.path, \ nltkResponse = nltkResponse) try: openallure.systemVoice = config[ 'Voice'][seq.language] except KeyError: pass url = record.url openallure.onQuestion = record.q openallure.ready = False break except: pass tags = [question[TAG] for question in seq.sequence] if nltkName in tags: openallure.onQuestion = tags.index(nltkName) openallure.ready = False if nltkName== 'what now' and \ openallure.currentString.lower() in tags: if openallure.onQuestion != \ tags.index(openallure.currentString): openallure.questions.append( openallure.onQuestion) openallure.onQuestion = \ tags.index(openallure.currentString) openallure.ready = False # If still no luck finding a match, use currentString # to search all through the text of all the questions if openallure.ready: for qnum, question in enumerate(seq.sequence): # search in question text and non-Input answer text nonInputAnswerText = [answer for answer,input in \ itertools.izip(question[ANSWER], question[INPUTFLAG]) if not input] qtext = " ".join(question[QUESTION]) + " " + \ " ".join(nonInputAnswerText) if qtext.lower().find( openallure.currentString.lower( )) > -1: if openallure.onQuestion != qnum: openallure.questions.append( openallure.onQuestion) openallure.onQuestion = qnum openallure.ready = False break if nltkType == 'quit': #TODO: Make this more polite # if graphViz: # graphViz.kill() raise SystemExit if nltkType == 'return': # Find first different sequence in db, walking back for id in range(record_id - 1, -1, -1): try: record = openallure.db[id] if not record.url in (url, u'nltkResponse.txt', _(u'[input]')): seq = QSequence(filename = record.url, \ path = seq.path, \ nltkResponse = nltkResponse) try: openallure.systemVoice = config[ 'Voice'][seq.language] except KeyError: pass url = record.url openallure.onQuestion = record.q openallure.ready = False # if graphViz: # # Fall through into graphing # nltkType = 'graph' # nltkName = 'show' break except: pass nltkResponse = u'' openallure.currentString = u'' if nltkType == 'open': # Reset stated question pointer for new sequence openallure.statedq = -1 path = seq.path linkStart = nltkResponse.find(u'[') linkEnd = nltkResponse.find(u']', linkStart) url = nltkResponse[linkStart + 1:linkEnd] seq = QSequence(filename=url, path=path, nltkResponse=nltkResponse) try: openallure.systemVoice = config['Voice'][ seq.language] except KeyError: pass openallure.questions = [] openallure.onQuestion = 0 openallure.ready = False # if graphViz: # # Fall through into graphing # nltkType = 'graph' # nltkName = 'show' nltkResponse = u'' openallure.currentString = u'' if nltkType == 'show': # use open (Mac only) to view source if sys.platform == 'darwin': # Find first non-[input] sequence in db, walking back for id in range(record_id - 1, -1, -1): record = openallure.db[id] if record.url.find('.txt') > 0 or \ record.url.find('http:') == 0 : if not record.url == 'nltkResponse.txt': url = record.url break os.system("open " + url) # if nltkResponse is one line containing a semicolon, # replace the semicolon with \n if nltkResponse.find('\n') == -1: nltkResponse = nltkResponse.replace(';', '\n') if nltkResponse: answer = choiceCount - 1 choice = (choiceCount, 0) else: # This takes last response answer = choiceCount - 1 choice = (choiceCount, 0) elif event.key == pygame.K_BACKSPACE and \ openallure.question[INPUTFLAG][choiceCount - 1] == 1: openallure.currentString = openallure.currentString[0:-1] openallure.question[ANSWER][choiceCount - 1] = \ openallure.currentString questionText[choiceCount] = \ questionText[choiceCount - 1] + \ u"\n" + openallure.currentString screen.fill(backgroundColor, rect=textRect) elif event.key <= 127 and \ openallure.question[INPUTFLAG][-1] == 1: # p rint event.key mods = pygame.key.get_mods() if mods & pygame.KMOD_SHIFT: if event.key in range(47, 60): openallure.currentString += \ (u'?', u')', u'!', u'@', u'#', u'$', u'%', u'^', \ u'&', u'*', u'(', u'', u':')[range(47, 60).index(event.key)] elif event.key == 45: openallure.currentString += u"_" elif event.key == 61: openallure.currentString += u"+" elif event.key == 96: openallure.currentString += u"~" else: openallure.currentString += \ unicode(chr(event.key).upper()) else: openallure.currentString += unicode(chr(event.key)) # openallure.question[ANSWER][choiceCount - 1] = \ # openallure.currentString # Add currentString to text being displayed questionText[choiceCount] = \ questionText[choiceCount - 1] + \ u"\n" + openallure.currentString screen.fill(backgroundColor, rect=textRect) # check for automatic page turn if openallure.ready and \ openallure.stated == True and \ not openallure.currentString and \ openallure.question[ANSWER][-1] == _(u'[next]') and \ pygame.time.get_ticks() - delayStartTime > delayTime: # This takes last response answer = choiceCount - 1 choice = (choiceCount, 0) if openallure.statedq == openallure.onQuestion: openallure.stated = True if openallure.ready and not openallure.stated: # work through statement of question # speaking each part of the question and each of the answers # (unless the process is cut short by other events) if onText == 0: screen.fill(backgroundColor, rect=textRect) pygame.display.flip() # Stop when onAnswer pointer is beyond length of answer list if onAnswer > len(openallure.question[ANSWER]): openallure.stated = True openallure.statedq = openallure.onQuestion else: # Speak each answer #(but only after speaking the full question below) if onAnswer > 0 and onAnswer < len( openallure.question[ANSWER]) + 1: answerText = openallure.question[ANSWER][onAnswer - 1] if not (answerText.startswith(_('[input]')) or answerText.startswith(_('[next]')) or answerText.endswith('...]') or answerText.endswith('...')): if len(answerText) > 0: # Check for answer with "A. " if answerText[1:3] == '. ': voice.speak(answerText[3:].strip(), openallure.systemVoice) else: voice.speak(answerText.strip(), openallure.systemVoice) del answerText # Speak each part of the question using onText pointer # to step through parts of question list if onText < len(openallure.question[QUESTION]): if not (openallure.question[QUESTION][onText].endswith( '...')): if len(openallure.question[QUESTION][onText]) > 0: # speak the current part of the question voice.speak(openallure.question[QUESTION][onText], openallure.systemVoice) if answer < 0 and openallure.ready: # Trap mouse click on text region textRegions = text.writewrap(None, \ text.font, \ text.boundingRectangle, \ text.unreadColor, \ questionText[-1]) # Create list where each element indicates with 1 or 0 # whether Y coordinate is in the region regions = [ inRegion(region, mouseButtonDownEventY) for region in textRegions ] # Find which region has a 1, if any if 1 in regions: onRegion = regions.index(1) else: onRegion = 0 if onRegion > 0: if mouseButtonDownEvent: answer = onRegion - 1 if answer < choiceCount: # record selection of answer record_id = openallure.db.insert(time = time.time(), \ url = unicode(url), q = openallure.onQuestion, \ a = answer) # if graphViz and openallure.question[ACTION][answer] == 0: # # Create .dot file for one sequence in response to answer in place # graphViz.kill() # oagraph(seq,openallure.db,url,openallure.showText,openallure.showResponses,openallure.showLabels) # graphViz = subprocess.Popen([graphVizPath, 'oagraph.dot']) choice = (answer + 1, 0) colorLevel = 0 # Update screen to reflect choice # text.paintText(screen, # justQuestionText, onText, # questionText, onAnswer, # highlight, # openallure.stated, # choice, # colorLevel, colorLevels) # pygame.display.flip() else: answer = -1 else: highlight = onRegion # Update screen to reflect highlight # text.paintText(screen, # justQuestionText, onText, # questionText, onAnswer, # highlight, # openallure.stated, # choice, # colorLevel, colorLevels) # pygame.display.flip() colorLevel -= 1 colorLevel = max(colorLevel, 0) else: highlight = 0 colorLevel = colorLevels # block non-choices if choice[0] < 0 or choice[0] > len(questionText) - 1: choice = (-1, 0) screen.fill(backgroundColor, rect=textRect) text.paintText(screen, justQuestionText, onText, questionText, onAnswer, highlight, openallure.stated, choice, colorLevel, colorLevels) # and move on to the next part # (which needs to be displayed while being spoken) onText += 1 # once all the parts of the question are done, # start working through answers if onAnswer > 0: onAnswer += 1 if onText == len(openallure.question[QUESTION]): onAnswer = 1 # Take note of time for automatic page turns delayStartTime = pygame.time.get_ticks() pygame.display.flip() elif not choice == (-1, 0) and openallure.ready: openallure.stated = True # respond to choice when something has been typed and entered if openallure.currentString: if len(nltkResponse) == 0: choice = (-1, 0) answer = -1 voice.speak(_("Try again"), openallure.systemVoice) else: voice.speak( _(u"You entered ") + openallure.currentString, openallure.systemVoice) # Reset string openallure.currentString = u'' # check whether a link is associated with this answer and, # if so, follow it if len(openallure.question[LINK] ) and openallure.question[LINK][answer]: webbrowser.open_new_tab(openallure.question[LINK][answer]) # wait in loop until window (re)gains focus if openallure.stickBrowser: openallure.gain = 0 while not openallure.gain: for event in pygame.event.get(): if event.type == pygame.ACTIVEEVENT: openallure.gain = event.gain #check that response exists for answer if len(openallure.question[RESPONSE]) and \ answer < len(openallure.question[RESPONSE]) and \ (isinstance(openallure.question[RESPONSE][answer], str) or \ isinstance(openallure.question[RESPONSE][answer], unicode)): #speak response to answer voice.speak(openallure.question[RESPONSE][answer].strip(), openallure.systemVoice) #check that next sequence exists as integer for answer if len(openallure.question[ACTION]) and \ answer < len(openallure.question[ACTION]) and \ isinstance(openallure.question[ACTION][answer], int): #get new sequence or advance in sequence action = openallure.question[ACTION][answer] if len(openallure.question[DESTINATION][answer]) > 0 and \ not openallure.question[ANSWER][answer] == _(u'[next]'): # speak("New source of questions") # Reset stated question pointer for new sequence openallure.statedq = -1 path = seq.path url = openallure.question[DESTINATION][answer] seq = QSequence(filename=url, path=path, nltkResponse=nltkResponse) try: openallure.systemVoice = config['Voice'][seq.language] except KeyError: pass openallure.onQuestion = 0 openallure.questions = [] else: # Add last question to stack (if not duplicate) and move on if action > 0: openallure.questions.append(openallure.onQuestion) openallure.onQuestion = openallure.onQuestion + action elif action < 0: openallure.onQuestion = max( 0, openallure.onQuestion + action) # Quit if advance goes beyond end of sequence if openallure.onQuestion >= len(seq.sequence): voice.speak(_("You have reached the end. Goodbye."), openallure.systemVoice) return openallure.ready = False
try: from buzhug import ProxyBase, buzhug_files except: from buzhug_client import ProxyBase import buzhug_files names = ['pierre','claire','simon','camille','jean', 'florence','marie-anne'] fr_names = [ 'andr\x82','fran\x87ois','h\x82l\x8ane' ] # latin-1 encoding remote = False if not remote: db = Base(r'dummy') else: db = ProxyBase('dummy') db.create(('name',str), ('fr_name',unicode), ('age',int), ('size',int,300), ('birth',date,date(1994,1,14)), ('afloat',float,1.0), ('birth_hour', dtime,dtime(10,10,10)), mode='override') # test float conversions f = db._file["afloat"] def all(v): return [ord(c) for c in v]
from buzhug import Base, Record try: from buzhug import ProxyBase, buzhug_files except: from buzhug_client import ProxyBase import buzhug_files names = ['pierre','claire','simon','camille','jean', 'florence','marie-anne'] fr_names = [ 'andr\x82','fran\x87ois','h\x82l\x8ane' ] # latin-1 encoding remote = False if not remote: db = Base(r'dummy') else: db = ProxyBase('dummy') db.create(('name',str), ('fr_name',unicode), ('age',int),('size',int),('birth',date),('afloat',float), mode='override') for i in range(100): db.insert(name=random.choice(names), fr_name = unicode(random.choice(fr_names),'latin-1'), age=random.randint(7,47),size=random.randint(110,175), birth=date(random.randint(1958,1999),random.randint(1,12),10), afloat = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)))
class RepDB: def __init__(self, path): self.path = path self.dbh_stored_blocks = Base(self.path) try: self.dbh_stored_blocks.create(('key', str), ('version', str)) except IOError: pass def open(self): self.dbh_stored_blocks.open() def add(self, oid, block_id, version): key = str((oid, block_id)) # lets see if we already have a key stored set = self.dbh_stored_blocks.select_for_update(['key', 'version'], key=key) if set == []: self.dbh_stored_blocks.insert(key, str(version)) else: set[0].update() def get(self, oid, block_id): key = str((oid, block_id)) result = self.dbh_stored_blocks.select(['key', 'version'], key=key) return result[0].version def update(self, oid, block_id, version): self.add(oid, block_id, version) def delete(self, oid, block_id): key = str((oid, block_id)) set = self.dbh_stored_blocks.select_for_update(['key', 'version'], key=key) self.dbh_stored_blocks.delete(set[0]) def close(self): self.dbh_stored_blocks.close() def getIterator(self): return RepDBIterator([record for record in self.dbh_stored_blocks])
class DB: def __init__(self, storage_path): self.dbh_objects = Base(os.path.join(storage_path, 'objects')) self.dbh_blocks = Base(os.path.join(storage_path, 'blocks')) self.dbh_replicas = Base(os.path.join(storage_path, 'replicas')) self.dbh_tree = Base(os.path.join(storage_path, 'tree')) self.dbh_paths = Base(os.path.join(storage_path, 'paths')) self.dbh_id = Base(os.path.join(storage_path, 'id')) self.dbh_tags = Base(os.path.join(storage_path, 'tags')) self.storage_path = storage_path def __create_root(self): """ Check if the filesystem has a / and if not create it""" print "Initializing filesystem..." if self.get_file(path='/'): return print "Creating root..." f = FSObject(1,1,'/',0,0,0,0) # lets see if we already have a key stored set = self.dbh_objects.select(['oid'],oid=str(f.oid)) if set == []: # we have create tree and paths first self.dbh_tree.insert(str(f.oid), str(f.parent)) self.dbh_paths.insert(str((f.parent, f.path))) self.dbh_objects.insert(str(f.oid), dumps(f), self.dbh_tree[len(self.dbh_tree)-1], self.dbh_paths[len(self.dbh_paths)-1]) #set the current oid for the id increment sequence set = self.dbh_id.select(['curr_oid']) if set == []: self.dbh_id.insert('1') def setup_fs_db(self): try: self.dbh_blocks.create(('key', str), ('blocks', str)) except IOError: self.dbh_blocks.open() try: self.dbh_replicas.create(('key', str), ('replicas', str)) except IOError: self.dbh_replicas.open() try: self.dbh_tree.create(('oid', str), ('parent', str)) except IOError: self.dbh_tree.open() try: self.dbh_tags.create(('oid', str), ('tag', str)) except IOError: self.dbh_tags.open() try: self.dbh_paths.create(('key', str)) except IOError: self.dbh_paths.open() try: self.dbh_id.create(('curr_oid', str)) except IOError: self.dbh_id.open() try: self.dbh_objects.create(('oid', str), ('fsobj', str), ('tree', self.dbh_tree), ('paths', self.dbh_paths)) except IOError: self.dbh_objects.open() self.__create_root() def get_path_oid(self, path): """Gets the parent filenode for path""" nodes = [] parent_path = path while 1: (parent_path,node) = os.path.split(parent_path) if node == '': nodes.insert(0,'/') break nodes.insert(0,node) parent_oid = 0 for node_name in nodes: key = str((parent_oid, node_name)) print "looking up: %s" % key # search for a match f = None for record in [record for record in self.dbh_objects]: if record.paths.key == key: f = loads(record.fsobj) break print "found it!" if not f: return 0 parent_oid = f.oid return parent_oid def insert_file(self, path, fsobj): #check first if there is a parent directory to store this file f = self.get_file(path=path) print "inserting file with path: "+path print fsobj if not f: print "ERR: [%s]" % os.path.split(fsobj.path)[0] raise FileSystemError('No parent directory to store: %s' % fsobj.path) #the parent of this object is the path fsobj.parent = f.oid set = self.dbh_id.select_for_update(['curr_oid']) curr_oid = int(set[0].curr_oid) + 1 fsobj.oid = curr_oid print "Inserting OID: %s" % fsobj # lets see if we already have a key stored result = self.dbh_objects.select(['oid','fsobj'],oid=str(fsobj.oid)) if result != []: raise FileSystemError('File already exists') else: # we have create tree and paths first self.dbh_tree.insert(str(fsobj.oid), str(fsobj.parent)) self.dbh_paths.insert(str((fsobj.parent, fsobj.path))) self.dbh_objects.insert(str(fsobj.oid), dumps(fsobj), self.dbh_tree[len(self.dbh_tree)-1], self.dbh_paths[len(self.dbh_paths)-1]) set[0].update(curr_oid=str(curr_oid)) return curr_oid def get_file(self, oid='', path=''): if oid: set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: f = None else: f = set[0].fsobj elif path: if path == '/': key = str((0,'/')) else: parent_oid = self.get_path_oid(os.path.split(path)[0]) node_name = os.path.split(path)[1] key = str((parent_oid, node_name)) # search for a match f = None for record in [record for record in self.dbh_objects]: print record.paths.key if record.paths.key == key: f = record.fsobj break else: f = None if f: f = loads(f) return f def get_children(self, oid): # lookup FSOBJECT with given oid set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: return [] file_array = [] # lookup objects with parent oid set = self.dbh_tree.select(['oid', 'parent'], parent=str(oid)) for i in set: obj = self.dbh_objects.select(['oid', 'fsobj'], oid=str(i.oid)) if obj != []: file_array.append(loads(obj[0].fsobj)) return file_array def debug_print_db(self, db): pass def print_object_db(self): self.debug_print_db(self.dbh_objects) def delete_dir(self,oid): pass def delete_file(self, oid): pass def rename_file(self,src,dest): pass def update_file(self, fsobj): set = self.dbh_objects.select_for_update(['oid', 'fsobj'], oid=str(fsobj.oid)) if set != []: set[0].update(fsobj=dumps(fsobj)) def add_block(self, block, serverid): f = self.get_file(oid=str(block.oid)) if not f: raise FileSystemError('add_block: Object %s does not exist' % block.oid) key = str((long(block.oid),long(block.block_id))) #the key is both the oid and the block_id set1 = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set1 == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set1[0].replicas) f.blocks[block.block_id] = block.version set2 = self.dbh_blocks.select_for_update(['key', 'blocks'], key=key) if set2 == []: b = None else: b = set2[0].block if b: b = loads(b) diff = block.size - b.size else: diff = block.size f.size += diff # update or insert? if set1 == []: self.dbh_blocks.insert(key, dumps(block)) else: set1[0].update(blocks=dumps(block)) self.update_file(f) replicas.add(serverid, block.version) # update or insert? if set2 == []: self.dbh_replicas.insert(key,dumps(replicas)) else: set2[0].update(replicas=dumps(replicas)) def add_block_replica(self, block, serverid): f = self.get_file(str(block.oid)) if not f: raise FileSystemError('add_block_replica: Object %s does not exist' % block.oid) key = str((block.oid, block.block_id)) set = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set[0].replicas) replicas.add(serverid, block.version) # update or insert? if set == []: self.dbh_replicas.insert(key,dumps(replicas)) else: set[0].update(replicas=dumps(replicas)) def get_block_replicas(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_replicas.select(['key', 'replicas'], key=key) if set == []: return None return loads(set[0].replicas) def get_block(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_blocks.select(['key', 'blocks'], key=key) if set == []: return None return loads(set[0].blocks) def print_replicas_db(self): self.debug_print_db(self.dbh_replicas) def close_fs_db(self): self.dbh_blocks.close() self.dbh_replicas.close() self.dbh_tree.close() self.dbh_id.close() self.dbh_paths.close() self.dbh_objects.close()
def __init__(self): Settings.load() self.sync_database() self.db = Base(os.path.join(Settings.DB_DIR, 'Filters'))
from buzhug import Base import urllib2 as urllib import json import sys import pickle import os #now ge those books unfound = [] fil = open('badguys', 'r+') unfound = pickle.load(fil) print 'pickle loaded' userdir = os.getcwd() bookDB = Base(userdir + '/db/bookDB').open() host = 'http://gbserver3a.cs.unc.edu' bookAsJson = '/book-as-json/?id=%s' for un in unfound: try: url = host + bookAsJson % un print un print 'loading ', url fp = urllib.urlopen(host + bookAsJson % un) bytes = fp.read().decode('utf-8') book = json.loads(bytes) print 'url loaded', fp try: #make sure record doesnt exist? Implelement later #check the stock reviewed status and put it in the #field I will play with #2 is unsure spam, 1 is HAM, 0 is spam.
class DB: def __init__(self, storage_path): self.dbh_objects = Base(os.path.join(storage_path, 'objects')) self.dbh_blocks = Base(os.path.join(storage_path, 'blocks')) self.dbh_replicas = Base(os.path.join(storage_path, 'replicas')) self.dbh_tree = Base(os.path.join(storage_path, 'tree')) self.dbh_paths = Base(os.path.join(storage_path, 'paths')) self.dbh_id = Base(os.path.join(storage_path, 'id')) self.dbh_tags = Base(os.path.join(storage_path, 'tags')) self.storage_path = storage_path def __create_root(self): """ Check if the filesystem has a / and if not create it""" print "Initializing filesystem..." if self.get_file(path='/'): return print "Creating root..." f = FSObject(1, 1, '/', 0, 0, 0, 0) # lets see if we already have a key stored set = self.dbh_objects.select(['oid'], oid=str(f.oid)) if set == []: # we have create tree and paths first self.dbh_tree.insert(str(f.oid), str(f.parent)) self.dbh_paths.insert(str((f.parent, f.path))) self.dbh_objects.insert(str(f.oid), dumps(f), self.dbh_tree[len(self.dbh_tree) - 1], self.dbh_paths[len(self.dbh_paths) - 1]) #set the current oid for the id increment sequence set = self.dbh_id.select(['curr_oid']) if set == []: self.dbh_id.insert('1') def setup_fs_db(self): try: self.dbh_blocks.create(('key', str), ('blocks', str)) except IOError: self.dbh_blocks.open() try: self.dbh_replicas.create(('key', str), ('replicas', str)) except IOError: self.dbh_replicas.open() try: self.dbh_tree.create(('oid', str), ('parent', str)) except IOError: self.dbh_tree.open() try: self.dbh_tags.create(('oid', str), ('tag', str)) except IOError: self.dbh_tags.open() try: self.dbh_paths.create(('key', str)) except IOError: self.dbh_paths.open() try: self.dbh_id.create(('curr_oid', str)) except IOError: self.dbh_id.open() try: self.dbh_objects.create(('oid', str), ('fsobj', str), ('tree', self.dbh_tree), ('paths', self.dbh_paths)) except IOError: self.dbh_objects.open() self.__create_root() def get_path_oid(self, path): """Gets the parent filenode for path""" nodes = [] parent_path = path while 1: (parent_path, node) = os.path.split(parent_path) if node == '': nodes.insert(0, '/') break nodes.insert(0, node) parent_oid = 0 for node_name in nodes: key = str((parent_oid, node_name)) print "looking up: %s" % key # search for a match f = None for record in [record for record in self.dbh_objects]: if record.paths.key == key: f = loads(record.fsobj) break print "found it!" if not f: return 0 parent_oid = f.oid return parent_oid def insert_file(self, path, fsobj): #check first if there is a parent directory to store this file f = self.get_file(path=path) print "inserting file with path: " + path print fsobj if not f: print "ERR: [%s]" % os.path.split(fsobj.path)[0] raise FileSystemError('No parent directory to store: %s' % fsobj.path) #the parent of this object is the path fsobj.parent = f.oid set = self.dbh_id.select_for_update(['curr_oid']) curr_oid = int(set[0].curr_oid) + 1 fsobj.oid = curr_oid print "Inserting OID: %s" % fsobj # lets see if we already have a key stored result = self.dbh_objects.select(['oid', 'fsobj'], oid=str(fsobj.oid)) if result != []: raise FileSystemError('File already exists') else: # we have create tree and paths first self.dbh_tree.insert(str(fsobj.oid), str(fsobj.parent)) self.dbh_paths.insert(str((fsobj.parent, fsobj.path))) self.dbh_objects.insert(str(fsobj.oid), dumps(fsobj), self.dbh_tree[len(self.dbh_tree) - 1], self.dbh_paths[len(self.dbh_paths) - 1]) set[0].update(curr_oid=str(curr_oid)) return curr_oid def get_file(self, oid='', path=''): if oid: set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: f = None else: f = set[0].fsobj elif path: if path == '/': key = str((0, '/')) else: parent_oid = self.get_path_oid(os.path.split(path)[0]) node_name = os.path.split(path)[1] key = str((parent_oid, node_name)) # search for a match f = None for record in [record for record in self.dbh_objects]: print record.paths.key if record.paths.key == key: f = record.fsobj break else: f = None if f: f = loads(f) return f def get_children(self, oid): # lookup FSOBJECT with given oid set = self.dbh_objects.select(['oid', 'fsobj'], oid=str(oid)) if set == []: return [] file_array = [] # lookup objects with parent oid set = self.dbh_tree.select(['oid', 'parent'], parent=str(oid)) for i in set: obj = self.dbh_objects.select(['oid', 'fsobj'], oid=str(i.oid)) if obj != []: file_array.append(loads(obj[0].fsobj)) return file_array def debug_print_db(self, db): pass def print_object_db(self): self.debug_print_db(self.dbh_objects) def delete_dir(self, oid): pass def delete_file(self, oid): pass def rename_file(self, src, dest): pass def update_file(self, fsobj): set = self.dbh_objects.select_for_update(['oid', 'fsobj'], oid=str(fsobj.oid)) if set != []: set[0].update(fsobj=dumps(fsobj)) def add_block(self, block, serverid): f = self.get_file(oid=str(block.oid)) if not f: raise FileSystemError('add_block: Object %s does not exist' % block.oid) key = str( (long(block.oid), long(block.block_id))) #the key is both the oid and the block_id set1 = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set1 == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set1[0].replicas) f.blocks[block.block_id] = block.version set2 = self.dbh_blocks.select_for_update(['key', 'blocks'], key=key) if set2 == []: b = None else: b = set2[0].block if b: b = loads(b) diff = block.size - b.size else: diff = block.size f.size += diff # update or insert? if set1 == []: self.dbh_blocks.insert(key, dumps(block)) else: set1[0].update(blocks=dumps(block)) self.update_file(f) replicas.add(serverid, block.version) # update or insert? if set2 == []: self.dbh_replicas.insert(key, dumps(replicas)) else: set2[0].update(replicas=dumps(replicas)) def add_block_replica(self, block, serverid): f = self.get_file(str(block.oid)) if not f: raise FileSystemError( 'add_block_replica: Object %s does not exist' % block.oid) key = str((block.oid, block.block_id)) set = self.dbh_replicas.select_for_update(['key', 'replicas'], key=key) if set == []: replicas = FSReplicas(block.oid, block.block_id) else: replicas = loads(set[0].replicas) replicas.add(serverid, block.version) # update or insert? if set == []: self.dbh_replicas.insert(key, dumps(replicas)) else: set[0].update(replicas=dumps(replicas)) def get_block_replicas(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_replicas.select(['key', 'replicas'], key=key) if set == []: return None return loads(set[0].replicas) def get_block(self, oid, block_id): key = str((long(oid), long(block_id))) set = self.dbh_blocks.select(['key', 'blocks'], key=key) if set == []: return None return loads(set[0].blocks) def print_replicas_db(self): self.debug_print_db(self.dbh_replicas) def close_fs_db(self): self.dbh_blocks.close() self.dbh_replicas.close() self.dbh_tree.close() self.dbh_id.close() self.dbh_paths.close() self.dbh_objects.close()
#! /usr/bin/env python # -*- coding: iso-8859-1 -*- from buzhug import Base import time # Open database db = Base('oadb').open() # pull records print(' time __id__ url q a cmd') for record in (record for record in db): print(time.strftime("%a, %d %b %Y %H:%M:%S ", time.localtime(record.time)), record.__id__, str(record.url), record.q, record.a, str(record.cmd)) # ('Sat, 01 Jan 2011 02:44:13 ', 31, 'cases.txt', 0, None, 'None') print(' time __id__ url q a cmd') raise SystemExit # insert recrod record_id = db.insert(localtime=time.time(),filename='test',question=0,answer=1) record_id = db.insert(localtime=time.time(),filename='test',question=1,answer=2) # Close database db.close() # Get list of questions touched [record.question for record in records] # Get list of answers touched [(record.question,record.answer) for record in records] # Create database db = Base(path)
def run_test(thread_safe=False): if not thread_safe: db = Base(r'dummy') else: db = TS_Base('dummy') db.create(('name',str), ('fr_name',unicode), ('age',int), ('size',int,300), ('birth',date,date(1994,1,14)), ('afloat',float,1.0), ('birth_hour', dtime,dtime(10,10,10)), mode='override') # test float conversions if thread_safe is False: f = db._file["afloat"] def all(v): return [ord(c) for c in v] for i in range(10): afloat = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)) try: assert cmp(afloat,0.0) == cmp(f.to_block(afloat),f.to_block(0.0)) except: print afloat print "afloat > 0.0 ?",afloat>0.0 print "blocks ?",f.to_block(afloat)>f.to_block(0.0) print all(f.to_block(afloat)),all(f.to_block(0.0)) raise assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10,10,10) assert db.defaults["birth"] == date(1994,1,14) for i in range(100): db.insert(name=random.choice(names), fr_name = unicode(random.choice(fr_names),'latin-1'), age=random.randint(7,47),size=random.randint(110,175), birth=date(random.randint(1858,1999),random.randint(1,12),10), afloat = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)), birth_hour = dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) assert len(db)==100 assert isinstance(db[50].fr_name,unicode) print db[50].fr_name.encode('latin-1') db.open() # test if default values have not been modified after open() assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10,10,10) assert db.defaults["birth"] == date(1994,1,14) for i in range(5): # insert a list db.insert(random.choice(names), unicode(random.choice(fr_names),'latin-1'), random.randint(7,47),random.randint(110,175), date(random.randint(1958,1999),random.randint(1,12),10), random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)), dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) db.insert(name=random.choice(names)) # missing fields for field in db.field_names[2:]: if field == "name": continue try: assert getattr(db[-1],field) == db.defaults[field] except: print "attribute %s not set to default value %s" %(field,db[-1]) raise # insert as string db.set_string_format(unicode,'latin-1') db.set_string_format(date,'%d-%m-%y') db.set_string_format(dtime,'%H-%M-%S') db.insert_as_strings(name="testname",fr_name=random.choice(fr_names), age=10,size=123,birth="07-10-95", birth_hour="20-53-3") assert db[-1].birth == date(1995,10,7) assert db[-1].name == "testname" assert db[-1].age == 10 assert db[-1].afloat == db.defaults["afloat"] db.insert_as_strings("testname",random.choice(fr_names), 11,134,"09-12-94",1.0, "5-6-13") assert db[len(db)-1].birth == date(1994,12,9) assert db[-1].name == "testname" assert db[-1].age == 11 assert db[-1].size == 134 assert db[-1].afloat == 1.0 # search between 2 dates print '\nBirth between 1960 and 1970' for r in db.select(None,birth=[date(1960,1,1),date(1970,12,13)]): print r.name,r.birth print "sorted" for r in db.select(None,birth=[date(1960,1,1),date(1970,12,13)]).sort_by('+name-birth'): print r.name,r.birth f = buzhug_files.FloatFile().to_block def all(v): return [ord(c) for c in f(v)] # search between 2 floats # selection by list comprehension s1 = [ r for r in db if 0.0 <= r.afloat <= 1e50 ] # selection by select s2 = db.select(None,'x<=afloat<=y',x=0.0,y=1e50) # selction by select with interval s3 = db.select(None,afloat=[0.0,1e50]) try: assert len(s1) == len(s2) == len(s3) except: print "%s records by list comprehension, " %len(s1) print "%s by select by formula," %len(s2) print "%s by select by interval" %len(s3) for r in s1: try: assert r in s2 except: print all(r.afloat) for r in s2: try: assert r in s1 except: print "in select but not in list comprehension",r raise r = db[0] assert r.__class__.db is db fr=random.choice(fr_names) s1 = [ r for r in db if r.age == 30 and r.fr_name == unicode(fr,'latin-1')] s2 = db.select(['name','fr_name'],age=30,fr_name = unicode(fr,'latin-1')) assert len(s1)==len(s2) # different ways to count the number of items assert len(db) == sum([1 for r in db]) == len(db.select(['name'])) # check if version number is correctly incremented for i in range(5): recs = db.select_for_update(['name'],'True') version = recs[0].__version__ recs[0].update() assert db[0].__version__ == version + 1 # check if cleanup doesn't change db length length_before = len(db) db.cleanup() assert len(db) == length_before # check if selection by select on __id__ returns the same as direct # access by id recs = db.select([],'__id__ == c',c=20) assert recs[0] == db[20] # check that has_key returns False for invalid hey assert not db.has_key(1000) # drop field db.drop_field('name') # check if field was actually removed from base definition and rows assert not "name" in db.fields assert not hasattr(db[20],"name") # add field db.add_field('name',str,default="marcel") # check if field was added with the correct default value assert "name" in db.fields assert hasattr(db[20],"name") assert db[20].name == "marcel" # change default value db.set_default("name","julie") db.insert(age=20) assert db[-1].name == "julie" # delete a record db.delete([db[10]]) # check if record has been deleted try: print db[10] raise Exception,"Row 10 should have been deleted" except IndexError: pass assert 10 not in db assert len(db) == length_before # selections # selection by generator expression # age between 30 et 32 d_ids = [] for r in [r for r in db if 33> r.age >= 30]: d_ids.append(r.__id__) length = len(db) # remove these items db.delete([r for r in db if 33> r.age >= 30]) # check if correct number of records removed assert len(db) == length - len(d_ids) # check if all records have been removed assert not [r for r in db if 33> r.age >= 30] # updates # select name = pierre s1 = db.select(['__id__','name','age','birth'],name='pierre') # make 'pierre' uppercase for record in db.select_for_update(None,'name == x',x='pierre'): db.update(record,name = record.name.upper()) # check if attribute was correctly updated for rec in s1: assert db[rec.__id__] == "Pierre" # increment ages for record in db.select_for_update([],'True'): age = record.age if not record.age is None: db.update(record,age = record.age+1) # check assert db[record.__id__].age == age + 1 for record in [r for r in db]: age = record.age if not record.age is None: db.update(record,age = record.age+1) # check assert db[record.__id__].age == age + 1 # change dates for record in db.select_for_update([],'age>v',v=35): db.update(record,birth = date(random.randint(1958,1999), random.randint(1,12),10)) db.commit() # check length after commit assert sum([1 for r in db]) == len(db) # insert new records for i in range(50): db.insert(name=random.choice(names), age=random.randint(7,47),size=random.randint(110,175)) # check that record 10 is still deleted try: print db[10] raise Exception,"Row 10 should have been deleted" except IndexError: pass print db.keys() print "has key 10 ?",db.has_key(10) assert 10 not in db #raw_input() # check that deleted_lines was cleared by commit() assert not db._pos.deleted_lines print db._del_rows.deleted_rows length = len(db) # before cleanup # physically remove the deleted items db.cleanup() # check that deleted_lines and deleted_rows are clean assert not db._pos.deleted_lines assert not db._del_rows.deleted_rows # check that record 10 is still deleted try: print db[10] raise Exception,"Row 10 should have been deleted" except IndexError: pass assert 10 not in db # check that length was not changed by cleanup assert len(db) == length assert len([ r for r in db]) == length # age > 30 for r in db.select(['__id__','name','age'], 'name == c1 and age > c2', c1 = 'pierre',c2 = 30): assert r.name == "pierre" assert r.age > 30 # name =="PIERRE" and age > 30 for r in db.select(['__id__','name','age','birth'], 'name == c1 and age > c2', c1 = 'PIERRE',c2 = 30): assert r.name == 'PIERRE' assert r.age > 30 # test with != for r in db.select(['__id__'],'name != c1',c1='claire'): assert r.name != 'claire' # age > id # with select s1 = db.select(['name','__id__','age'],'age > __id__') for r in s1: assert r.age > r.__id__ # with iter s2 = [ r for r in db if r.age > r.__id__ ] for r in s2: assert r.age > r.__id__ assert len(s1) == len(s2) # birth > date(1978,1,1) # with select s1 = db.select(['name','__id__','age'],'birth > v',v=date(1978,1,1)) for r in s1: assert r.birth > date(1978,1,1) # with iter s2 = [ r for r in db if r.birth and r.birth > date(1978,1,1) ] for r in s2: assert r.birth > date(1978,1,1) assert len(s1) == len(s2) # test with floats for i in range(10): x = random.uniform(-10**random.randint(-307,307), 10**random.randint(-307,307)) s1 = [ r for r in db if r.afloat > x ] s2 = db.select(['name'],'afloat > v',v=x) assert len(s1)==len(s2) # base with external link houses = Base('houses') houses.create(('address',str),('flag',bool),('resident',db,db[0]),mode="override") addresses = ['Giono','Proust','Mauriac','Gide','Bernanos','Racine', 'La Fontaine'] ks = db.keys() for i in range(50): x = random.choice(ks) address = random.choice(addresses) houses.insert(address=address,flag = address[0]>"H",resident=db[x]) # houses with jean s1 = [] for h in houses: if h.resident.name == 'jean': s1.append(h) # by select : ??? #s2 = houses.select([],'resident.name == v',v='jean') # assert len(s1) == len(s2) h1 = Base('houses') h1.open() l1 = len(h1.select([],flag=True)) l2 = len(h1.select([],flag=False)) assert l1 + l2 == len(h1) class DictRecord(Record): def __getitem__(self, k): item = self names = k.split('.') for name in names: item = getattr(item, name) return item h1.set_record_class(DictRecord) print '\nrecord_class = DictRecord, h1[0]' print h1[0] print "\nResident name: %(resident.name)s\nAddress: %(address)s" % h1[0]
def main(): X =[] Y=[] featuresDB = Base(os.getcwd()+"\\Databases\\features.db") featuresDB.open() print "features open" for rec in featuresDB: vec = [] vec.append(rec.f1) vec.append(rec.f3) vec.append(rec.f4) vec.append(rec.f5) vec.append(rec.f6) vec.append(rec.f7) vec.append(rec.f10) vec.append(rec.f11) vec.append(rec.f12) vec.append(rec.f13) vec.append(rec.f14) vec.append(rec.f15) vec.append(rec.f16) vec.append(rec.f17) vec.append(rec.f18) vec.append(rec.f19) vec.append(rec.f20) vec.append(rec.f21) vec.append(rec.f22) vec.append(rec.f23) X.append(vec) Y.append(rec.score) print "building classifier" Y = np.array(Y) ybar = Y.mean() for i in range(len(Y)): if Y[i]<ybar: Y[i]=1 else: Y[i]=2 scaler = Scaler().fit(X) X = scaler.transform(X) X= np.array(X) Y=np.array(Y) skf = cross_validation.StratifiedKFold(Y,k=2) for train, test in skf: X_train, X_test = X[train], X[test] y_train, y_test = Y[train], Y[test] clf = ExtraTreesClassifier(n_estimators=8,max_depth=None,min_split=1,random_state=0,compute_importances=True) scores = cross_validation.cross_val_score(clf,X_train,y_train,cv=5) clf.fit_transform(X_train,y_train) print "Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std() / 2) print clf.feature_importances_ y_pred =clf.predict(X_test) print classification_report(y_test,y_pred) model=(scaler,clf) joblib.dump(model,'AestheticModel\\aestheticModel.pkl') print "Done"
def run_test(thread_safe=False): if not thread_safe: db = Base(r'dummy') else: db = TS_Base('dummy') db.create(('name', str), ('fr_name', unicode), ('age', int), ('size', int, 300), ('birth', date, date(1994, 1, 14)), ('afloat', float, 1.0), ('birth_hour', dtime, dtime(10, 10, 10)), mode='override') # test float conversions if thread_safe is False: f = db._file["afloat"] def all(v): return [ord(c) for c in v] for i in range(10): afloat = random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)) try: assert cmp(afloat, 0.0) == cmp(f.to_block(afloat), f.to_block(0.0)) except: print afloat print "afloat > 0.0 ?", afloat > 0.0 print "blocks ?", f.to_block(afloat) > f.to_block(0.0) print all(f.to_block(afloat)), all(f.to_block(0.0)) raise assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10, 10, 10) assert db.defaults["birth"] == date(1994, 1, 14) for i in range(100): db.insert(name=random.choice(names), fr_name=unicode(random.choice(fr_names), 'latin-1'), age=random.randint(7, 47), size=random.randint(110, 175), birth=date(random.randint(1858, 1999), random.randint(1, 12), 10), afloat=random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)), birth_hour=dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) assert len(db) == 100 assert isinstance(db[50].fr_name, unicode) print db[50].fr_name.encode('latin-1') db.open() # test if default values have not been modified after open() assert db.defaults["age"] == None assert db.defaults["size"] == 300 assert db.defaults["afloat"] == 1.0 assert db.defaults["birth_hour"] == dtime(10, 10, 10) assert db.defaults["birth"] == date(1994, 1, 14) for i in range(5): # insert a list db.insert( random.choice(names), unicode(random.choice(fr_names), 'latin-1'), random.randint(7, 47), random.randint(110, 175), date(random.randint(1958, 1999), random.randint(1, 12), 10), random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)), dtime(random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))) db.insert(name=random.choice(names)) # missing fields for field in db.field_names[2:]: if field == "name": continue try: assert getattr(db[-1], field) == db.defaults[field] except: print "attribute %s not set to default value %s" % (field, db[-1]) raise # insert as string db.set_string_format(unicode, 'latin-1') db.set_string_format(date, '%d-%m-%y') db.set_string_format(dtime, '%H-%M-%S') db.insert_as_strings(name="testname", fr_name=random.choice(fr_names), age=10, size=123, birth="07-10-95", birth_hour="20-53-3") assert db[-1].birth == date(1995, 10, 7) assert db[-1].name == "testname" assert db[-1].age == 10 assert db[-1].afloat == db.defaults["afloat"] db.insert_as_strings("testname", random.choice(fr_names), 11, 134, "09-12-94", 1.0, "5-6-13") assert db[len(db) - 1].birth == date(1994, 12, 9) assert db[-1].name == "testname" assert db[-1].age == 11 assert db[-1].size == 134 assert db[-1].afloat == 1.0 # search between 2 dates print '\nBirth between 1960 and 1970' for r in db.select(None, birth=[date(1960, 1, 1), date(1970, 12, 13)]): print r.name, r.birth print "sorted" for r in db.select(None, birth=[date(1960, 1, 1), date(1970, 12, 13)]).sort_by('+name-birth'): print r.name, r.birth f = buzhug_files.FloatFile().to_block def all(v): return [ord(c) for c in f(v)] # search between 2 floats # selection by list comprehension s1 = [r for r in db if 0.0 <= r.afloat <= 1e50] # selection by select s2 = db.select(None, 'x<=afloat<=y', x=0.0, y=1e50) # selction by select with interval s3 = db.select(None, afloat=[0.0, 1e50]) try: assert len(s1) == len(s2) == len(s3) except: print "%s records by list comprehension, " % len(s1) print "%s by select by formula," % len(s2) print "%s by select by interval" % len(s3) for r in s1: try: assert r in s2 except: print all(r.afloat) for r in s2: try: assert r in s1 except: print "in select but not in list comprehension", r raise r = db[0] assert r.__class__.db is db fr = random.choice(fr_names) s1 = [r for r in db if r.age == 30 and r.fr_name == unicode(fr, 'latin-1')] s2 = db.select(['name', 'fr_name'], age=30, fr_name=unicode(fr, 'latin-1')) assert len(s1) == len(s2) # different ways to count the number of items assert len(db) == sum([1 for r in db]) == len(db.select(['name'])) # check if version number is correctly incremented for i in range(5): recs = db.select_for_update(['name'], 'True') version = recs[0].__version__ recs[0].update() assert db[0].__version__ == version + 1 # check if cleanup doesn't change db length length_before = len(db) db.cleanup() assert len(db) == length_before # check if selection by select on __id__ returns the same as direct # access by id recs = db.select([], '__id__ == c', c=20) assert recs[0] == db[20] # check that has_key returns False for invalid hey assert not db.has_key(1000) # drop field db.drop_field('name') # check if field was actually removed from base definition and rows assert not "name" in db.fields assert not hasattr(db[20], "name") # add field db.add_field('name', str, default="marcel") # check if field was added with the correct default value assert "name" in db.fields assert hasattr(db[20], "name") assert db[20].name == "marcel" # change default value db.set_default("name", "julie") db.insert(age=20) assert db[-1].name == "julie" # delete a record db.delete([db[10]]) # check if record has been deleted try: print db[10] raise Exception, "Row 10 should have been deleted" except IndexError: pass assert 10 not in db assert len(db) == length_before # selections # selection by generator expression # age between 30 et 32 d_ids = [] for r in [r for r in db if 33 > r.age >= 30]: d_ids.append(r.__id__) length = len(db) # remove these items db.delete([r for r in db if 33 > r.age >= 30]) # check if correct number of records removed assert len(db) == length - len(d_ids) # check if all records have been removed assert not [r for r in db if 33 > r.age >= 30] # updates # select name = pierre s1 = db.select(['__id__', 'name', 'age', 'birth'], name='pierre') # make 'pierre' uppercase for record in db.select_for_update(None, 'name == x', x='pierre'): db.update(record, name=record.name.upper()) # check if attribute was correctly updated for rec in s1: assert db[rec.__id__] == "Pierre" # increment ages for record in db.select_for_update([], 'True'): age = record.age if not record.age is None: db.update(record, age=record.age + 1) # check assert db[record.__id__].age == age + 1 for record in [r for r in db]: age = record.age if not record.age is None: db.update(record, age=record.age + 1) # check assert db[record.__id__].age == age + 1 # change dates for record in db.select_for_update([], 'age>v', v=35): db.update(record, birth=date(random.randint(1958, 1999), random.randint(1, 12), 10)) db.commit() # check length after commit assert sum([1 for r in db]) == len(db) # insert new records for i in range(50): db.insert(name=random.choice(names), age=random.randint(7, 47), size=random.randint(110, 175)) # check that record 10 is still deleted try: print db[10] raise Exception, "Row 10 should have been deleted" except IndexError: pass print db.keys() print "has key 10 ?", db.has_key(10) assert 10 not in db #raw_input() # check that deleted_lines was cleared by commit() assert not db._pos.deleted_lines print db._del_rows.deleted_rows length = len(db) # before cleanup # physically remove the deleted items db.cleanup() # check that deleted_lines and deleted_rows are clean assert not db._pos.deleted_lines assert not db._del_rows.deleted_rows # check that record 10 is still deleted try: print db[10] raise Exception, "Row 10 should have been deleted" except IndexError: pass assert 10 not in db # check that length was not changed by cleanup assert len(db) == length assert len([r for r in db]) == length # age > 30 for r in db.select(['__id__', 'name', 'age'], 'name == c1 and age > c2', c1='pierre', c2=30): assert r.name == "pierre" assert r.age > 30 # name =="PIERRE" and age > 30 for r in db.select(['__id__', 'name', 'age', 'birth'], 'name == c1 and age > c2', c1='PIERRE', c2=30): assert r.name == 'PIERRE' assert r.age > 30 # test with != for r in db.select(['__id__'], 'name != c1', c1='claire'): assert r.name != 'claire' # age > id # with select s1 = db.select(['name', '__id__', 'age'], 'age > __id__') for r in s1: assert r.age > r.__id__ # with iter s2 = [r for r in db if r.age > r.__id__] for r in s2: assert r.age > r.__id__ assert len(s1) == len(s2) # birth > date(1978,1,1) # with select s1 = db.select(['name', '__id__', 'age'], 'birth > v', v=date(1978, 1, 1)) for r in s1: assert r.birth > date(1978, 1, 1) # with iter s2 = [r for r in db if r.birth and r.birth > date(1978, 1, 1)] for r in s2: assert r.birth > date(1978, 1, 1) assert len(s1) == len(s2) # test with floats for i in range(10): x = random.uniform(-10**random.randint(-307, 307), 10**random.randint(-307, 307)) s1 = [r for r in db if r.afloat > x] s2 = db.select(['name'], 'afloat > v', v=x) assert len(s1) == len(s2) # base with external link houses = Base('houses') houses.create(('address', str), ('flag', bool), ('resident', db, db[0]), mode="override") addresses = [ 'Giono', 'Proust', 'Mauriac', 'Gide', 'Bernanos', 'Racine', 'La Fontaine' ] ks = db.keys() for i in range(50): x = random.choice(ks) address = random.choice(addresses) houses.insert(address=address, flag=address[0] > "H", resident=db[x]) # houses with jean s1 = [] for h in houses: if h.resident.name == 'jean': s1.append(h) # by select : ??? #s2 = houses.select([],'resident.name == v',v='jean') # assert len(s1) == len(s2) h1 = Base('houses') h1.open() l1 = len(h1.select([], flag=True)) l2 = len(h1.select([], flag=False)) assert l1 + l2 == len(h1) class DictRecord(Record): def __getitem__(self, k): item = self names = k.split('.') for name in names: item = getattr(item, name) return item h1.set_record_class(DictRecord) print '\nrecord_class = DictRecord, h1[0]' print h1[0] print "\nResident name: %(resident.name)s\nAddress: %(address)s" % h1[0]
def __init__(self, dbname): self.db = Base(dbname).open() self.db.set_string_format(unicode, 'utf-8')
#!/usr/bin/python #Script to extract played songs from a shoutcast page, identify new songs and finally write them to a file which can be used by lastfmsubmitd. #Jan 2014 by Apie import datetime import urllib2 import re import yaml import pytz from buzhug import Base db = Base('/home/denick/pinguin') yamlfile = '/var/spool/lastfm/played.yaml' #db.create(('date',datetime.datetime),('artist',str),('title',str)) url = "http://pr128.pinguinradio.nl/played.html" headers = { 'User-Agent' : 'Mozilla/5.0' } db.open() req = urllib2.Request(url, None, headers) html = urllib2.urlopen(req) contents = html.read(3000) m = re.match('.*Song Title</b></td></tr><tr><td>([0-9:]{8})</td><td>(.*)-(.*)<td><b>Current Song</b>.*',contents) time = m.group(1) yamlfile = yamlfile+time artist = m.group(2).strip() title = m.group(3).strip() dateformat="%Y-%m-%d" date = datetime.datetime.strftime(datetime.date.today(),dateformat) timestamp=datetime.datetime.strptime(date+' '+time,dateformat+" %H:%M:%S") #lastfmsubmitd doesnt accept timezones so convert to UTC
class TransactionsDB(object): BASE = 'banan/transactions' def __init__(self, conf): self.config = conf self._sessions = {} self.open() # Management def open(self): self.db = Base(TransactionsDB.BASE) try: self.db.open() except IOError: self.db.create(('amount' , float), ('amount_local', float), ('date' , date), ('account' , str), ('label' , str), ('currency' , str)) def close(self): self.db.close() def clearall(self): self.db.destroy() self.open() def insert(self, entry): self.db.insert(amount = entry['amount'], amount_local = entry['amount_local'], date = entry['date'], account = entry['account'], label = entry['label'], currency = entry['currency']) def feed(self, fpath, parser, skip_duplicates=True, overwrite=False, delete=False, dry_run=False): deleted = added = 0 for entry in parser.parse(fpath): if dry_run: print('%s %-40s\t%12.2f %s' % (entry['date'].isoformat(), entry['account'][:40], entry['amount'], entry['currency'])); continue if skip_duplicates or overwrite or delete: _dup = self.db(date=entry['date'], account=entry['account'], amount=entry['amount']) if _dup: if overwrite or delete: deleted += len(_dup) self.db.delete(_dup) else: continue if delete: continue entry['label'] = self.config.assign_label(entry) self.insert(entry) added += 1 if not dry_run: INFO(' added %i transactions' % added) INFO(' deleted %i transactions' % deleted) parser.post_process(self.db, added) def update_labels(self): # Load all records into memory. File will get corrupt if using the iterator. records = [rec for rec in self.db] for record in records: as_dict = dict((field, getattr(record, field)) for field in record.fields) label = self.config.assign_label(as_dict) if label != record.label: self.db.update(record, label=label) self.db.cleanup() # Queries get_amount = lambda rec: rec.amount_local def results_as_text(self, results): results.sort_by('date') idx = 0 record = results[idx] text_list = [] while True: text_list.append('%s %-40s\t%12.2f %s' % (record.date.isoformat(), unicode(record.account[:40], 'utf-8'), record.amount, record.currency)); try: idx += 1 record = results[idx] except IndexError: return text_list def assemble_data(self, sid, datatype, foreach, show, select): try: session = self._sessions.get(sid, {}) if session: if session['raw_query'] == (foreach, show, select): # Same query, return cached result return True, \ self._sessions[sid]['flot_' + show] if datatype == 'plot' else \ self._sessions[sid]['text'] # Helpers get_amount = lambda rec: rec.amount_local M = range(1,13) total = strlen = 0 data = {} text = {} query = 'date1 <= date < date2 and label == l' if foreach == 'label': if session: if session['raw_query'][0] == 'label' and session['raw_query'][2] == select: # Same query, but different presentation (sum or average) return True, \ self._sessions[sid]['flot_' + show] if datatype == 'plot' else \ self._sessions[sid]['text'] # New query dates = re.findall('[0-9]{6}', unquote_plus(select)) date1 = date2 = date(int(dates[0][2:]), int(dates[0][:2]), 1) if len(dates) == 2: date2 = date(int(dates[1][2:]), int(dates[1][:2]), 1) date2 = date(date2.year + (date2.month == 12), M[date2.month - 12], 1) for label in self.config.labels.iterkeys(): results = self.db.select(None, query, l = label, date1 = date1, date2 = date2) value = sum(map(get_amount, results)) if abs(value) > 1: data[label] = value if label not in self.config.cash_flow_ignore: total += value else: label += '*' text[label] = self.results_as_text(results) strlen = len(text[label][-1]) sumstr = '%12.2f %s' % (value, self.config.local_currency) text[label].append('-' * strlen) text[label].append(' ' * (strlen - len(sumstr)) + sumstr) ydelta = date2.year - date1.year mdelta = date2.month - date1.month delta = 12 * ydelta + mdelta session['flot_average'] = {} for key, val in data.iteritems(): session['flot_average'][key] = val/delta elif foreach in ('month', 'year'): # New query date1 = date2 = first = datetime.now() if foreach == 'month': first = date(date1.year - 1, date1.month, 1) date1 = date(date1.year - (date1.month == 1), M[date1.month - 2], 1) date2 = date(date2.year, date2.month, 1) else: first = date(date1.year - 9, 1, 1) date1 = date(date1.year, 1, 1) date2 = date(date2.year + 1, 1, 1) select = unquote_plus(select) while date1 >= first: results = self.db.select(None, query, l = select, date1 = date1, date2 = date2) value = sum(map(get_amount, results)) date2 = date1 if foreach == 'month': key = date1.strftime('%Y.%m') date1 = date(date2.year - (date2.month == 1), M[date2.month - 2], 1) else: key = str(date1.year) date1 = date(date2.year - 1, 1, 1) data[key] = value total += value if results: text[key] = self.results_as_text(results) strlen = len(text[key][-1]) sumstr = '%12.2f %s' % (value, self.config.local_currency) text[key].append('-' * strlen) text[key].append(' ' * (strlen - len(sumstr)) + sumstr) # All good, set new session attributes session['raw_query'] = (foreach, show, select) session['flot_sum'] = data session['text'] = text if session['text']: session['text']['***'] = ['-' * strlen, 'SUM: %12.2f %s' % (total, self.config.local_currency), '-' * strlen] self._sessions[sid] = session return True, session['flot_' + show] if datatype == 'plot' else session['text'] except Exception as e: return False, str(e)
from buzhug import Base import urllib2 as urllib import json import sys import pickle import os #now ge those books unfound = [] fil = open('badguys', 'r+') unfound = pickle.load(fil) print 'pickle loaded' userdir = os.getcwd() bookDB = Base(userdir + '/db/bookDB').open() host = 'http://gbserver3a.cs.unc.edu' bookAsJson = '/book-as-json/?id=%s' for un in unfound: try: url = host + bookAsJson % un print un print 'loading ', url fp = urllib.urlopen(host + bookAsJson % un) bytes = fp.read().decode('utf-8') book = json.loads(bytes) print 'url loaded', fp try: #make sure record doesnt exist? Implelement later #check the stock reviewed status and put it in the #field I will play with
#! /usr/bin/env python # -*- coding: iso-8859-1 -*- from buzhug import Base import time # Open database db = Base('oadb').open() # pull records print(' time __id__ url q a cmd') for record in (record for record in db): print(time.strftime("%a, %d %b %Y %H:%M:%S ", time.localtime(record.time)), record.__id__, str(record.url), record.q, record.a, str(record.cmd)) # ('Sat, 01 Jan 2011 02:44:13 ', 31, 'cases.txt', 0, None, 'None') print(' time __id__ url q a cmd') raise SystemExit # insert recrod record_id = db.insert(localtime=time.time(), filename='test', question=0, answer=1) record_id = db.insert(localtime=time.time(), filename='test', question=1, answer=2) # Close database db.close() # Get list of questions touched
def __init__(self): Settings.load() self.sync_database() self.open = False self.db = Base(os.path.join(Settings.DB_DIR, 'Posts'))
from time import time from buzhug import Base from os import system start = time() db=Base('t1').create(('a',int),('b',int),('c',str)) for i in range(0, 1000): db.insert(i, i, "stuff%s" % i) db.commit() print "Finished: %ss" % (time() - start) system("pause")
#!/usr/bin/python2.7 #this is a commandline utility to quickly get book text for testing purposes from buzhug import Base import os import sys import json db = Base(os.getcwd() + '/db/bookDB') db.open() c=0 l = len(sys.argv) while c < l-1: c+=1 inp = sys.argv[c] dupProtect = True bookID = int(inp) records = db.select(ID = bookID) bookText = '' slug = '' for r in records: j = json.loads(r.json) if j.get('ID') == bookID: text = j.get('pages') for p in text: bookText += p['text'] + '\n' slug = r.slug if dupProtect: break print bookText db.close()
try: from buzhug import ProxyBase, buzhug_files except: from buzhug_client import ProxyBase import buzhug_files names = [ 'pierre', 'claire', 'simon', 'camille', 'jean', 'florence', 'marie-anne' ] fr_names = ['andr\x82', 'fran\x87ois', 'h\x82l\x8ane'] # latin-1 encoding remote = False if not remote: db = Base(r'dummy') else: db = ProxyBase('dummy') db.create(('name', str), ('fr_name', unicode), ('age', int), ('size', int), ('birth', date), ('afloat', float), mode='override') for i in range(100): db.insert(name=random.choice(names), fr_name=unicode(random.choice(fr_names), 'latin-1'), age=random.randint(7, 47), size=random.randint(110, 175), birth=date(random.randint(1958, 1999), random.randint(1, 12), 10), afloat=random.uniform(-10**random.randint(-307, 307),
import sys import os sys.path.append(os.getcwd()+"\\lib") from bs4 import BeautifulSoup, SoupStrainer import urllib2 from urllib2 import Request, urlopen, URLError, HTTPError from buzhug import Base import re import unicodedata num_imgs=0 imagesDB = Base(os.getcwd()+"\\Databases\\training_images.db") try: imagesDB.open() except IOError: print "creaitng imabegase" imagesDB.create(('title',str),("url",str),("score",float),("fileloc",str )) ##imagesDB.create(('title',str),("url",str),("score",float),("fileloc",str ), mode="override") challenges = {(1005,110),(1154,44),(430,82),(296,52), (1412,11),(980,52),(616,70),(536,67),(707,64),(431,61),(565,61)} ##challenges = {(430,1),(1154,1)} for challenge in challenges: print "challenge #",challenge[0] for i in range(1,challenge[1]+1):#pages on the website print "ripping page " + str(i) page = urllib2.urlopen("<http://www.dpchallenge.com/challenge_results.php?CHALLENGE_ID="+str(challenge[0])+"&page="+str(i)+">") rawHtml = page.read() anchors = SoupStrainer("a", {'class':'i'}) soup = BeautifulSoup(rawHtml, parse_only=anchors)
class RepDB: def __init__(self, path): self.path = path self.dbh_stored_blocks = Base(self.path) try: self.dbh_stored_blocks.create(('key', str), ('version', str)) except IOError: pass def open(self): self.dbh_stored_blocks.open() def add(self, oid, block_id, version): key = str((oid, block_id)) # lets see if we already have a key stored set = self.dbh_stored_blocks.select_for_update(['key','version'],key=key) if set == []: self.dbh_stored_blocks.insert(key, str(version)) else: set[0].update() def get(self, oid, block_id): key = str((oid, block_id)) result = self.dbh_stored_blocks.select(['key','version'],key=key) return result[0].version def update(self, oid, block_id, version): self.add(oid, block_id, version) def delete(self, oid, block_id): key = str((oid, block_id)) set = self.dbh_stored_blocks.select_for_update(['key','version'],key=key) self.dbh_stored_blocks.delete(set[0]) def close(self): self.dbh_stored_blocks.close() def getIterator(self): return RepDBIterator([record for record in self.dbh_stored_blocks])
def main(): AestheticScorer.LoadModel() #Make the database. We need tables for events, locations, photos, people, and faces events = Base(os.getcwd()+"\\Databases\\events.db") events.create(('name',str),('firsttime',str),('lasttime',str),mode="override") locations = Base(os.getcwd()+"\\Databases\\locations.db") locations.create(('lat',float),('lon',float),('name',str),mode="override") photos = Base(os.getcwd()+"\\Databases\\photos.db") photos.create(('path',str),('timestamp',str),('aestheticscore',int),('locationid',int), ('eventid',int), ("width", int), ("height", int),mode="override") people = Base(os.getcwd()+"\\Databases\\people.db") people.create(('name',str),('age',int),mode="override") faces = Base(os.getcwd()+"\\Databases\\faces.db") faces.create(('photoid',int),('thumbnail',str),('personid',int),('x',int),('y',int),('w',int),('h',int),mode="override") # Walk through all the directories, making a list of photos, geotagging the lowest level subdirectories, and making # a master list of the photos and geotags photolist = [] geotaglist = [] print "geocoding directories" for dirname, dirnames, filenames in os.walk(os.getcwd()+"\\Images\\photos"): #geocode all the subdirectory names for subdirname in dirnames: n,lat,long = geotag.findlocation(subdirname) #if we have a problem geotagging, prompt the user for a different location name while n == "NONE": newname = raw_input("We couldn't find the location '" + subdirname + "'. Type in another place name to try.") n, lat, long = geotag.findlocation(subdirname) #once we have a valid location, insert it if it's not already in the database if not locations(name=n): locations.insert(float(lat), float(long), n) #add all the files to a file list, and go ahead and make a parallel geotags for filename in filenames: #print "filename is ",filename if filename[-3:] == "jpg" or filename[-3:] == "JPG": #find the id for that subdirname in the database so we can geotag it locname = dirname[dirname.rfind('\\') + 1:] #location = locations(name=dirname) photolist.append(os.path.join(dirname,filename)) geotaglist.append((os.path.join(dirname,filename),locname)) #make a list to identify which event each photo is in #print photolist print "getting events" eventLabels, uniqueEvents = timecluster.eventCluster(photolist) #print "events: " #print eventLabels print uniqueEvents #insert the events into the event database for label in uniqueEvents: events.insert(label[1],"","") #the events are already sorted according to photo names #now sort the geotags and photolist according to photo names as well, so we'll have parallel lists geotaglist.sort() photolist.sort() #now we can finally insert each photo, with a name, event, and geotag for i in range(len(photolist)): print i, photolist[i] width, height = Image.open(photolist[i]).size photos.insert(photolist[i],eventLabels[i][1], AestheticScorer.getScore(photolist[i]), locations(name=geotaglist[i][1])[0].__id__, eventLabels[i][0], int(width), int(height)) print "finding faces" #for all the images we just gathered, find the people and faces, and insert them into the database facelist = [] for file in photolist: facelist.append(detectfaces.detectFaceInImage(file)) faceimages, projections, imgs, minFaces = detectfaces.faceLBP(facelist)#detectfaces.facePCA(facelist) labels, nclusters = detectfaces.gMeansCluster(projections, minFaces) #detectfaces.visualizeResults(faceimages, labels, nclusters) #add the individuals we found in the photos into the people database i = 0 for i in range(0,nclusters): people.insert(str(i),0) #add the faces, linking them to the individuals faceindex = 0 photoindex = 0 for listing in facelist: facerects = listing[1:] for entry in facerects: faces.insert(photoindex,imgs[faceindex],people[labels[faceindex]].__id__,entry[0],entry[1],entry[2],entry[3]) faceindex = faceindex + 1 photoindex = photoindex + 1 print "updating events" #update the events table to include tthe start time eventtimes=[] for event in events.select(None).sort_by("+__id__"): orderedPhotos = photos.select(eventid=event.__id__).sort_by("+timestamp") events.update(event,firsttime=orderedPhotos[0].timestamp) events.update(event,lasttime=orderedPhotos[-1].timestamp) print "Done"
import datetime ###opening remarks print "\n**************************" print "Welcome to Tar Heel Ranker v1.0. The goal of this project is to actively assist in the reviewing process of books that have no home. For more information contact Jesse Osiecki at [email protected]" print "standard usage: python tarheelreview.py -FLAGS" print "Flags are: r for redisbayes checker (Bayesian spam filter. Make sure Redis is running for this" print "g for gibberish detector using markov chains on letter transitions" print "m for markover -- markov chain filter using word transitions" print "a for author check. Outputs number of books a given author has in the Database that are already reviewed" print "s for rating check. Tests books current rating against a set value (4) and returns boolean if higher" print "recommended usage is -masg" print "NOTE: to end the program, simply press CTRL-D and the reviewed books will be noted in a file in the CWD" print "\n\n**************************" db = Base(os.getcwd() + '/db/bookDB').open() #open redis rb = redisbayes.RedisBayes(redis.StrictRedis(host='localhost', port=6379, db=0)) books = db.select(reviewed=False) #select all unreviewed markov = markover.Markov() def author_checker(aut): authored_books = db.select(author_id=aut, reviewed=True) return len(authored_books) def rating_checker(rat, lim): return rat >= lim redis_bayes = False gibb = False markover = False author_check = False
#!/bin/python2.7 from buzhug import Base import os db = Base(os.getcwd() + '/db/bookDB').open() res = db.select(reviewed=True) #select all reviewed for r in res: print r.text.encode('utf-8')
def main(): events = Base(os.getcwd()+"\\Databases\\events.db") locations = Base(os.getcwd()+"\\Databases\\locations.db") photos = Base(os.getcwd()+"\\Databases\\photos.db") people = Base(os.getcwd()+"\\Databases\\people.db") faces = Base(os.getcwd()+"\\Databases\\faces.db") training = Base(os.getcwd()+"\\Databases\\training_images.db") features = Base(os.getcwd()+"\\Databases\\features.db") try: print "============ events ================" events.open() for field in events.fields: print field,events.fields[field] print "len",len(events),"\n\n" for record in events.select().sort_by("+firsttime"): print record elist =[ None for i in range(len(events))] print elist print "============ locations ================" locations.open() for field in locations.fields: print field,locations.fields[field] print "len",len(locations),"\n\n" #for record in locations: # print record print "============ photos ================" photos.open() for field in photos.fields: print field,photos.fields[field] print "len",len(photos),"\n\n" for record in photos: print record print "============ people ================" people.open() for field in people.fields: print field,people.fields[field] print "len",len(people),"\n\n" #for record in people: # print record print "=========== faces ==============" faces.open() for field in faces.fields: print field,faces.fields[field] print "len",len(faces),"\n\n" #for record in faces: # print record print "============ training ========" training.open() for field in training.fields: print field, training.fields[field] print "len",len(training),"\n\n" print "============ features ===========" features.open() for field in features.fields: print field, features.fields[field] print "len",len(features),"\n\n" except IOError as err: print "no database there:",err