def highQuality(self):
        global COUNTER
        
        #Search scraper  
        Search.search()

        #Displaying a window massage, telling the user to select their background image
        displayBackgroundMessage()
        imagePath = getImagePath()
        tileFolder = "ImageTiles"
        image = Image.open(imagePath)
        width = image.size[0]
        size = getMuilt(width)

        build_mosaic(
            input_path = imagePath,
            output_path="mosaicImage"+str(COUNTER)+".png",
            collection_path= tileFolder,
            #Enlarge image
            zoom = size,
            #Tile size
            thumb_size = 20,
            fuzz=10,
            new_colormap=False
        )

        COUNTER +=1
        displayFinishMessage()
Пример #2
0
 def true(self, CE, mapping_list):
     assert len(mapping_list) >0
     
     nmapping_list = []
     for mapping in mapping_list:
         new_self = deepcopy(self)
         assert new_self == self
         for i in range(len(self.args)):
             new_self.args[i] = self.args[i].unify(mapping)
             
         #print 'Searching for %s' % self
         if len(Search.determination_list(CE, new_self)) == 0:
             continue
         
         if Search.search_true(CE, self) is True:
             nmapping_uf = Search.search_true(CE, self, return_mapping=True)
             nmapping = {}
             for a,b in nmapping_uf.items():
                 #print a,b
                 if isinstance(a, Atom):
                     nmapping[b]=a
                 else:
                     nmapping[a]=b
                     
             nmapping_list.append(dict(mapping.items() + nmapping.items()))
     #print self, mapping_list, nmapping_list
     return len(nmapping_list) > 0, nmapping_list
Пример #3
0
def ui(inp):
    # Handle search text

    if Search.text == "":
        if inp == curses.KEY_UP:
            ChannelBox.updateChannelBox(0)
        if inp == curses.KEY_DOWN:
            ChannelBox.updateChannelBox(1)
            
    if (inp == curses.ascii.BS) or (inp == curses.ascii.SP):
        Search.update(inp)
        if (Search.text == ""):
            ChannelBox.updateChannelBox(inp)
        else:
            ChannelBox.fillSearchBox(Search.text, inp)
            
    if ((inp >= 65 and inp <=90) or (inp >= 97 and inp <= 122)) or (inp >=48 and inp <=57):
        Search.update(inp)
        ChannelBox.fillSearchBox(Search.text, inp)
    elif not(Search.text == ""):
        if inp == curses.KEY_UP:
            ChannelBox.fillSearchBox(Search.text, 0)
        if inp == curses.KEY_DOWN:
            ChannelBox.fillSearchBox(Search.text, 1)
    if inp == curses.ascii.ESC:
            exit()

    if inp == curses.KEY_ENTER:
        row = ChannelBox.getSelectedRow()
        Utils.startPlaying(row[3])
Пример #4
0
def write():

    
    #first finger scan
    setBaud()    
    data = genImg.getHeader()
    ser.write(bytearray(data));
    time.sleep(1)
    s = ser.read(ser.inWaiting())
    print([hex(ord(c)) for c in s])
    genImg.parse(s)
    # generate character file of the finger image.
    setBaud()
    data = Img2Tz.getHeader(0x01)
    ser.write(bytearray(data));
    time.sleep(1)
    s = ser.read(ser.inWaiting())
    print([hex(ord(c)) for c in s])
    Img2Tz.parse(s)

    setBaud()
    data = Search.getHeader(0x01, 0x0000, 0x0064)
    ser.write(bytearray(data));
    time.sleep(1)
    s = ser.read(ser.inWaiting())
    print([hex(ord(c)) for c in s])
    print Search.parse(s)
Пример #5
0
def search(init_param):
    try:
        Path.Check(init_param['output.path'])
        
        if platform.system() == 'Linux':
            bat_file = os.path.join(init_param['output.path'], 'normal.bash')
            bat_fp = open(bat_file, 'w')
            bat_fp.write('export PATH=%s:$PATH\n' % ClusterSetting.MPIPath) #modified 2012.6.11
            bat_fp.write('export LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH\n' % ClusterSetting.pLinkBinPath)
        elif platform.system() == 'Windows':
            bat_file = os.path.join(init_param['output.path'], 'normal.bat')
            bat_fp = open(bat_file, 'w')
            bat_fp.write('@echo off\n')
            bat_fp.write('%s\n' % init_param['bin.path'][0:2])
        else:
            raise Exception('search_and_filter', 'unknown platform, only support Windows and Linux')
        
        bat_fp.write('cd "%s"\n' % init_param['bin.path'])
        
        search_mode = string.atoi(init_param['search_mode'])
        pfind_param = Search._ConstructpFindParam(init_param, search_mode, init_param['output.path'])
        print 'Step : Search by Searcher'
        spectra_list = init_param['spectra_list']
        spectra_title = init_param['spectra.title']
        bin_path = init_param['bin.path']
        
        for i in range(0, len(init_param['spectra_list'])):
            pfind_file = os.path.join(init_param['output.path'], '%s%d.pfind' % (init_param['spectra.title'], i+1))
            pfind_result_file = os.path.join(init_param['output.path'], '%s%d_qry.proteins.txt' % (init_param['spectra.title'], i+1))
    
            if os.path.isfile(pfind_result_file):
                print os.path.split(pfind_result_file)[-1] + ' did exist, skip the step';
            else:
                print 'Searcher of '+ spectra_list[i];
                spectrum = []
                spectrum.append(('spec_title', spectra_title+'%d' %(i+1)))
                spectrum.append(('spec_type', '%s' % init_param['spectra.format'].upper()))
                spectrum.append(('spec_path', spectra_list[i]))
                pfind_param['spectrum'] = spectrum
                
                Search._WritepFind(pfind_param, pfind_file, search_mode)
                if platform.system() == 'Windows':
                    bat_fp.write('"%s" "%s"\n' % (os.path.join(bin_path,'Searcher'), pfind_file))
                else:
                    bat_fp.write('"%s" "%s"\n' % (os.path.join(bin_path,'Importer'), pfind_file))
                    if 'mpicores' in init_param:
                        mpicores = init_param['mpicores']
                    else:
                        mpicores = ClusterSetting.DefaultCores
                    if 'mpihosts' in init_param:
                        mpihosts = init_param['mpihosts']
                    else:
                        mpihosts = ClusterSetting.DefaultHosts
                    bat_fp.write('mpirun -np %s -host %s "%s" "%s"\n' %(mpicores, mpihosts, os.path.join(bin_path,'MPISearcher'), pfind_file))
        bat_fp.close()

    except Exception, e:
        print Exception + ": " + e
Пример #6
0
	def main_page(self):
		import Search
		#add to the user the bleats he follows
		for listening in self.listens:
			following = Search.search_user_by_ID_e(listening)
			for bleats in following.bleats:
				self.add_bleats(bleats)
		#add to the user the bleats mentioning him
		bleat_list = Search.search_bleat_by_content("@"+self.username)
		for bleat in bleat_list:
			self.add_bleats(bleat)
Пример #7
0
 def default(self, line):
     if len(line) == 0:
         return
         
     if line[-1] == '?':
         print Search.search(self.CE, Parser._parse_pred(line[:-1]))
         return
         
     try:
         self.CE.update(Parser._parse(line))
         print 'Accepted'
     except:
         print traceback.format_exc()
Пример #8
0
def menu(host, T, t_host):
	while True:
		print ("Scegli azione PEER:\nlogin\t - Login\nquit\t - Quit\n\n")
		choice = input()

		if (choice == "login" or choice == "l"):
			t_host, sessionID = logi.login(host, t_host)
			if sessionID != bytes(const.ERROR_LOG, "ascii"):
				tfunc.success("Session ID: " + str(sessionID, "ascii"))

				listPartOwned = {}

				daemonThreadP = daemon.PeerDaemon(host, listPartOwned)
				daemonThreadP.setName("DAEMON PEER")
				daemonThreadP.setDaemon(True)
				daemonThreadP.start()

				waitingDownload = []

				while True:
					if len(waitingDownload) == 0:
						print ("\n\nScegli azione PEER LOGGATO:\nadd\t - Add File\nsearch\t - Search and Download\nlogout\t - Logout\n\n")
						choice_after_log = input()

						if (choice_after_log == "add" or choice_after_log == "a"):
							add.add(host, sessionID, t_host, listPartOwned)

						elif (choice_after_log == "search" or choice_after_log == "s"):
							src.search(sessionID, host, t_host, listPartOwned, waitingDownload)

						elif (choice_after_log == "logout" or choice_after_log == "l"):
							if (logo.logout(host, t_host, sessionID) > 0):

								break

						else:
							tfunc.error("Wrong Choice!")
					else:
						time.sleep(1)

			else:
				tfunc.error("Errore Login")	

		elif (choice == "quit" or choice == "q"):
			if T:
				logo.quit(host)
			break

		else:
			tfunc.error("Wrong Choice")
Пример #9
0
def fillSearchBox(fltr, key):
    global channels
    global searchPos
    global main_window
    global list_win

    max_row = list_win.getmaxyx()[0] - 2
    
    filtered = Search.filter_out(fltr, channels)
    
    if (len(filtered) - 1) < max_row:
        max_row = len(filtered) + 1

    if key == 0 and searchPos >= 2:
        searchPos -= 1
    if key == 1 and searchPos < max_row:
        searchPos += 1

    if searchPos > max_row - 1:
        searchPos = max_row - 1

    clearListWin(Strings.search_bx)

    for i in range(1, max_row):
        channel = filtered[i - 1]
        outstr = getRowText(channel)

        if (i == searchPos):
            list_win.addstr(i, 1, outstr, curses.A_REVERSE)
        else:
            list_win.addstr(i, 1, outstr)

    list_win.refresh()
Пример #10
0
 def findInFiles(self):
     names = []
     pattern = self.findTxt.GetValue()
     bRecursive = self.chkRecursiveSearch.GetValue()
     file_filter = string.split(self.cmbFileFilter.GetValue(), ';')
     folder = [self.cmbFolder.GetValue()]
     self.engine.addFolder(folder[0])
     self.engine.addSuffix(self.cmbFileFilter.GetValue())
     dlg = wx.ProgressDialog(_("Building file list from directory '%s'") % (folder[0]),
                    _('Searching...'), 100, self.view,
                     wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_AUTO_HIDE)
     try:
         iterDirFiles = Search.listFiles(folder, file_filter, 1, bRecursive)
         iStep = 0
         for sFile in iterDirFiles:
             names.append(sFile)
             if iStep < 100 and not dlg.Update(iStep):
                 #self.view.model.editor.setStatus('Search aborted')
                 break
             iStep = iStep + 1
     finally:
         dlg.Destroy()
     self.engine.findAllInFiles(names, self.view, pattern )
     self.setComboBoxes('findInFiles')
     if self.engine.closeOnFound:
         self.EndModal(wx.ID_OK)
Пример #11
0
def datetimesortkey_chapter_number(x):
    """concatenates chapter number with the time.
    pads the chapter number if necessary."""

    #
    # example keys:
    # _________________________081021_165843
    # 01.English00000000000000_081022_154011
    # 01.Hindi0000000000000000_081022_154008
    #

    if x.has_key("chapter_number"):
        chapter = x["chapter_number"]
        chapter = chapter.strip()
        chapter = chapter[:maxChapterDigits]
        # if chapter.isdigit():
        #    chapter = int(chapter)
        #    chapter = ('%(#)0' + str(maxChapterDigits) + 'd') % {'#': chapter}
        # else:
        #    chapter = chapter.ljust(maxChapterDigits, '0')
        chapter = chapter.ljust(maxChapterDigits, "0")
    else:
        chapter = "".ljust(maxChapterDigits, "_")

    db_print("datetimesortkey_chapter_number, chapter: " + chapter, 38)

    timeVal = Search.changed_time_key(x)
    timeStr = timeVal.strftime("%y%m%d_%H%M%S")
    answer = chapter + "_" + timeStr

    db_print("datetimesortkey_chapter_number, answer: " + answer, 41)
    return answer
Пример #12
0
	def OnSearch(self, event):
		keyword = self.search.GetValue()
		if keyword:
			func = lambda data: Search.match(keyword, data)
			self.tree.HighlightTree(func)
		else:
			self.tree.UnHighlightTree()
Пример #13
0
 def drawBody(self, container):
     menu = HorizontalPanel(ID="aur-menu-int")
     search_cont = SimplePanel(StyleName="aur-content-boundary")
     search = VerticalPanel(ID="aur-search")
     footer = VerticalPanel(ID="aur-footer", Width="100%", HorizontalAlignment="center")
     search_cont.add(search)
     container.add(menu)
     container.add(search_cont)
     container.add(self.content)
     container.add(footer)
     container.setCellHeight(menu, "1px")
     container.setCellHeight(footer, "1px")
     container.setCellHorizontalAlignment(footer, "center")
     self.drawInternalMenu(menu)
     Search.draw(search)
     self.drawFooter(footer)
Пример #14
0
    def post(self):
        """
        This receives a Query Object and performs a Search based on information from that Query.

        It then compares the results to the search_date and if they are deemed fresh, stores them.
        """
        
        new_search = Search()
        f = formatDatetime()

        q = db.get(self.request.get('key'))
        results = new_search.getResults(q.term, q.min, q.max, q.city)
        
        # Pull fresh listings from query, if they exist
        if q.fresh:
            fresh = q.fresh
        else: 
            fresh = FreshEntries()
            fresh.entries = [] # Store fresh listings here

        search_date = q.search_date
        latest_entry_time = search_date

        # Compare each entry datetime to the saved datetime
        for e in results:
            # Extract and format times from feed
            f_entry_time = f.craigslist_to_datetime(e.date)

            # Compute elapsed time since last search and this listing
            difference = f_entry_time - search_date

            # If entry is after the saved time, flag it as fresh
            if f_entry_time > search_date:

                # Check and see if this is the chronologically latest listing
                if f.craigslist_to_datetime(e.date) > latest_entry_time:
                    latest_entry_time = f.craigslist_to_datetime(e.date)

                entry = Entry(date = e.date, title = e.title, link = e.link)
                db.put(entry)
                fresh.entries.append(entry.key())
        db.put(fresh)

        # put back query with new search_date and new fresh listings
        q.search_date = latest_entry_time
        q.fresh = fresh
        db.put(q)
def main(argv):
    """
    main(argv)
    The entry point of the application
    The input format should be: <API key> <infobox/question> <query>
    """
    # API key
    if argv[0] == 'test':
        # Use the default key
        api_key = 'AIzaSyBgfj3L8cqcu6OEd21JkQcHhBQJA6jUOXo'
    else:
        api_key = argv[0]

    # Source: normal, file, or interact
    if argv[1] == 'normal' or argv[1] == 'file' or argv[1] == 'interact':
        source = argv[1]
    else:
        print 'Source should be \"normal\", \"file\", or \"interact\"'
        return

    # Mode: infobox or question
    if source != 'interact':
        if argv[2] == 'infobox' or argv[2] == 'question':
            mode = argv[2]
        else:
            print 'Type should be either \"infobox\" or \"question\"'
            return

    # Get the search engine object with the given API key
    se = Search.get_engine(api_key)

    if source == 'normal':
        query = ' '.join(argv[3:])
        if mode == 'question':
            question(se, query)
        else:
            infobox(se, query)
    elif source == 'file':
        qfile = open(argv[3], 'r')
        for line in qfile:
            if line.endswith('\n'):
                line = line[0:-1]
            if mode == 'question':
                question(se, line)
            else:
                infobox(se, line)
    else:   # Interact
        query = ''
        while True:
            try:
                query = raw_input('Anything curious? ')
                print 'Searching...'
                if query.endswith('?'):
                    question(se, query)
                else:
                    infobox(se, query)
            except KeyboardInterrupt:
                print 'Bye~'
                break
Пример #16
0
def countyMapperMenu(caches):
    #Filters caches given by BC, then tries to assign them to a regional district of BC.
    
    #Menu to choose province
    print "Will search BC Caches and try to assign counties"
    
    #get caches in BC currently without a county
    (options, args, search) = Search.parse('-s "British Columbia" -O C', False)
    cacheList = Search.parseOptions(caches, options)
    
    #write these caches to a file
    filename = writeCountyMapper(cacheList)
    
    BCPolygons = ['BC/Alberni-Clayoquot.arc', 'BC/Bulkley-Nechako.arc', 'BC/Capital.arc',
                'BC/Cariboo.arc', 'BC/Central Coast.arc', 'BC/Central Kootenay.arc', 
                'BC/Central Okanagan.arc', 'BC/Columbia-Shuswap.arc', 'BC/Comox Valley.arc', 
                'BC/Cowichan Valley.arc', 'BC/East Kootenay.arc', 'BC/Fraser Valley.arc', 
                'BC/Fraser-Fort George.arc', 'BC/Greater Vancouver.arc', 'BC/Kitimat-Stikine.arc',
                'BC/Kootenay Boundary.arc',  'BC/Mount Waddington.arc', 'BC/Nanaimo.arc', 
                'BC/North Okanagan.arc', 'BC/Northern Rockies.arc', 'BC/Okanagan-Similkameen.arc',
                'BC/Peace River.arc', 'BC/Powell River.arc', 'BC/Skeena-Queen Charlotte.arc',
                'BC/Squamish-Lillooet.arc', 'BC/Stikine.arc', 'BC/Strathcona.arc', 
                'BC/Sunshine Coast.arc', 'BC/Thompson-Nicola.arc']
    #call GPSBabel to filter these caches through each polygon, then remove temp files
    gcids = {}
    for polygonName in BCPolygons:
        callPolygonFilter(filename, polygonName, "test.out")
        gcids.update(readCountyMapper(os.path.join(os.getcwd(), 'test.out'), polygonName[3:-4]))
        os.remove("test.out")
        if len(gcids) > 0:
            log.info("%s has caches" %(polygonName[3:]))
        else:
            log.info("%s does not have caches" %(polygonName[3:]))
    os.remove(filename)
    
    #save county name to caches
    for cache in cacheList:
        if cache.gcid in gcids.keys():
            cache.county = gcids[cache.gcid]
        else:
            log.error("%s was not found in a BC Regional District despite being in BC" %cache.gcid)
    return
Пример #17
0
	def print_reply(self):
		if self.in_reply_to == "":
			return ""
		else:
			import Search
			reply_to = Search.search_bleat_by_bleat_ID(self.in_reply_to)
			if reply_to.exist:
				return open(base+"reply_dropdown.html").read().format(reply_to.content + "<p/><strong> By:</strong>"+ reply_to.author)
			else:
				return ""
			return open(base+"reply_dropdown.html").read().format(self.in_reply_to)
Пример #18
0
def classicalCS2(tchords):
    allChords = pianoFilter(ChordSpaces.makeRange([(47, 67), (52, 76), (60, 81)]))
    #print("Total number of possible chords: ", len(allChords))
    # print(allChords[:10])
    qSpace = ChordSpaces.partition(ChordSpaces.opEq, allChords)
    # print(qSpace)
    chords = map(lambda x: x.absChord, tchords)
    # print(chords)
    newChords = Search.greedyProg(qSpace, ChordSpaces.opEq, testPred, Search.nearFall, chords)
    print(newChords)
    for i in range(len(tchords)):
        tchords[i].absChord = [] + newChords[i]
Пример #19
0
    def get(self):
            
        if users.get_current_user():
            url = "/user"
            url_linktext = 'My Want List'
        else:
            url = users.create_login_url(self.request.uri)
            url_linktext = 'Login'

        f = formatDatetime()

        formatted_term = self.request.get('q')

        # handle spaces in query terms
        min = self.request.get('Min')
        max = self.request.get('Max')
        city = self.request.get('City')
        new_search = Search()
        try:
            results = new_search.getResults(formatted_term, min, max, city)
            if results:
                search_date = f.craigslist_to_datetime(results[0].date)
            else:
                search_date = datetime.now()

            template_values = {
                'search_date': search_date,
                'term': formatted_term,
                'min': min,
                'max': max,
                'city': city,
                'results': results,
                'url': url,
                'url_linktext': url_linktext,
                }
            path = os.path.join(os.path.dirname(__file__), '../static/html/search.html')
            self.response.out.write(template.render(path, template_values))
        except:
            self.response.out.write("Sorry, there was an error connecting to Craigslist.  Try checking your 'City' field, or possibly your internet connection, and try again.")
Пример #20
0
def classicalCS2WithRange(tchords, voiceRange = [(47, 67), (52, 76), (60, 81)]):
    #allChords = pianoFilter(ChordSpaces.makeRange(voiceRange))
    allChords = PTGG.filter(ChordSpaces.makeRange(voiceRange), Constraints.satbFilter)
    #print("Total number of possible chords: ", len(allChords))
    # print(allChords[:10])
    qSpace = ChordSpaces.partition(ChordSpaces.opcEq, allChords)
    print(qSpace)
    chords = map(lambda x: x.absChord, tchords)
    newChords = Search.greedyProg(qSpace, ChordSpaces.opcEq, testPred, Search.nearFall, chords)
    #print("New Chords: ", newChords)
    print(newChords)
    for i in range(len(tchords)):
        tchords[i].absChord = [] + newChords[i]
Пример #21
0
 def search(self):
     clear = self.clearText.get(1.0, Tkinter.END)
     rst = Search.search(clear)
     self.encodedText.delete(1.0, Tkinter.END)
     content = Search.jsonParser(rst)
     display = ""
     
     if (len(content) == 0):
         display = "Nothing" 
     else:
         for tweet in content:
             key = tweet.keys()[0]
             display +=  key + " said: \n" + "\t" + tweet.get(key) + "\n\n"
             
         checkReport = False
         for tweet in content:
             if(Search.insertMySQL(tweet)):
                 checkReport = True
         if(checkReport):
             tkMessageBox.showinfo("Insert to database", "Insert successfully" )
     
     self.encodedText.insert(1.0, display)
Пример #22
0
def process_documents():
    '''Read From Document'''
    documents = Utilities.read_from_time_all()
    #documents = read_lines()
    '''Tokens and Stem Documents'''
    documents = Utilities.tokenize_stem_docs(documents)
    '''calculate doc lengths'''
    doc_len = Utilities.calculate_doc_len(documents)
    ''' term frequency'''
    tf = TFIDF.term_frequency(documents)
    '''calculates tf-idf'''
    tfidf = TFIDF.TFIDF(len(documents), tf)
    '''Read From Document'''
    queries = Utilities.read_from_time_que()
    #queries = ['pop love song', 'chinese american', 'city']
    '''Tokens and Stem Documents'''
    queries = Utilities.tokenize_stem_docs(queries)
    
    
    #print Search.search_by_cosine(tfidf,len(documents),['CARTOONISTS'.lower()])
    
    
    cosine_result = []
    rsv_result = []
    BM25_1_5 = []  #b=1 k= 0.5
    BM25_1_1 = [] #b=1 k= 1
    BM25_2_5 = [] #b=2 k= 0.5
    BM25_2_1 = [] #b=2 k= 1 
    
    
    for query in queries:
        cosine_result.append(Search.search_by_cosine(tfidf,len(documents),query))
        rsv_result.append(Search.search_by_rsv(tf,len(documents),query))
        BM25_1_5.append(Search.search_by_BM25(tf,doc_len,query,1.0,0.5))
        BM25_1_1.append(Search.search_by_BM25(tf,doc_len,query,1.0,1.0))
        BM25_2_5.append(Search.search_by_BM25(tf,doc_len,query,2.0,0.5))
        BM25_2_1.append(Search.search_by_BM25(tf,doc_len,query,2.0,1.0))
    
    #print cosine_result[1]
    '''
    read from time.rel
    '''    
    rel_dict = Utilities.read_from_time_rel()
    '''
    print result
    '''
    result = []

    result.append(('System','Precision','Recall','F1','MAP')) 
    result.append( ('cosine  ',) + Metrics.getMetrics(cosine_result,rel_dict,20)) #limit to top 20 search
    result.append( ('RSV  ',) + Metrics.getMetrics(rsv_result,rel_dict,20))
    result.append(('BM25 (1, .5) ',)+ Metrics.getMetrics(BM25_1_5,rel_dict,20))
    result.append(('BM25 (1, 1) ',)+Metrics.getMetrics(BM25_1_1,rel_dict,20))
    result.append(('BM25 (2, .5) ',)+Metrics.getMetrics(BM25_2_5,rel_dict,20)) 
    result.append(('BM25 (2, 1) ',)+Metrics.getMetrics(BM25_2_1,rel_dict,20))
    
    Utilities.tabulate(result)
    Utilities.plot_graph(result)
Пример #23
0
 def get_new_posts(query, collection):
     loops = 0
     repeat = False
     to_be_inserted = []
     while (not repeat):
         lst = Search.search(query, loops * MAX_QUERY, modifier="create_ts desc")[1]
         for post in lst:
             if posts_collection.find_one({"item.id":post['item']['id']}):
                 repeat = True
                 break
             else:
                 to_be_inserted.append(post)
         loops += 1
     return to_be_inserted
Пример #24
0
	def print_bleats(self):
		import Search
		string=""
		def getKey(custom):
			return custom.time
		bleat_list=list()
		for bleat in self.bleats:
			try:
				bleat_list.append(Search.search_bleat_by_bleat_ID(bleat))
			except:
				continue
		for bleat in sorted(bleat_list,key=getKey,reverse=True):
			string += bleat.format_bleat()
		return open(base+"bleat_panel.html").read().format(string)
Пример #25
0
def main(screen):    
    screen.clear()	
    screen.refresh()

    drw = screen.refresh

    width = curses.COLS
    height = curses.LINES
    half_width = int(width / 2)
    
    screen.border()

    Utils.drawCenteredOnTitle(screen, Strings.title)

    screen.addstr(height - 3, half_width + 1, Strings.getting_channel_list)

    drw()

    channels = GetChannels.get()
    
    screen.addstr(height - 3, half_width + 1, "Got " + str(len(channels)) + " channels.       ")
    drw()

    ChannelBox.init(width, height, screen, channels)
    Search.init(screen, width, height)
    
    drw()    

    ChannelBox.updateChannelBox(-1)
    
    while True:
        inp = screen.getch()

        if Utils.is_playing:
            playing(inp)
        else:
            ui(inp)
Пример #26
0
    def mediumQuality(self):
        global COUNTER
        Search.search()
        displayBackgroundMessage()    
        imagePath = getImagePath()
        tileFolder = "ImageTiles"
        image = Image.open(imagePath)
        width = image.size[0]
        size = getMuilt(width)

        build_mosaic(
            input_path = imagePath,
            output_path="mosaicImage"+str(COUNTER)+".png",
            collection_path= tileFolder,
            #Enlarge image
            zoom = size,
            #Tile size
            thumb_size = 40,
            fuzz=10,
            new_colormap=False
        )
        
        COUNTER +=1
        displayFinishMessage()
Пример #27
0
def get_search_result():
    form = cgi.FieldStorage()
    selName = form.getfirst('sel', '')
    if not selName:
        ryw.give_bad_news(
            'AddSearchAll: failed to find current search result: ',
            logging.error)
        return None

    tmpSearchResultDir = Search.decide_search_result_dir()
    if not tmpSearchResultDir:
        ryw.give_bad_news('AddSearchAll: ' +
                          'decide_search_result_dir failed.',
                          logging.error)
        return None

    searchFileName = os.path.join(tmpSearchResultDir, selName)
    searchSel = ProcessDownloadReq.get_reqs(searchFileName)
    
    return searchSel
Пример #28
0
def start():
    print('Starting comment stream:')
    last_checked_pms = time.time()

    #This opens a constant stream of comments. It will loop until there's a major error (usually this means the Reddit access token needs refreshing)
    comment_stream = praw.helpers.comment_stream(reddit, SUBREDDITLIST, limit=250, verbosity=0)

    for comment in comment_stream:

        # check if it's time to check the PMs
        if (time.time() - last_checked_pms) > TIME_BETWEEN_PM_CHECKS:
            process_pms()
            last_checked_pms = time.time()

        #Is the comment valid (i.e. it's not made by Roboragi and I haven't seen it already). If no, try to add it to the "already seen pile" and skip to the next comment. If yes, keep going.
        if not (Search.isValidComment(comment, reddit)):
            try:
                if not (DatabaseHandler.commentExists(comment.id)):
                    DatabaseHandler.addComment(comment.id, comment.author.name, comment.subreddit, False)
            except:
                pass
            continue

        process_comment(comment)
Пример #29
0
def process_comment(comment, is_edit=False):
    #Anime/Manga requests that are found go into separate arrays
    animeArray = []
    mangaArray = []
    lnArray = []

    #ignores all "code" markup (i.e. anything between backticks)
    comment.body = re.sub(r"\`(?s)(.*?)\`", "", comment.body)

    #This checks for requests. First up we check all known tags for the !stats request
    if re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)',
                 comment.body, re.S) is not None:
        username = re.search('[uU]\/([A-Za-z0-9_-]+?)(>|}|$)', comment.body,
                             re.S)
        subreddit = re.search('[rR]\/([A-Za-z0-9_]+?)(>|}|$)', comment.body,
                              re.S)

        if username:
            commentReply = CommentBuilder.buildStatsComment(
                username=username.group(1))
        elif subreddit:
            commentReply = CommentBuilder.buildStatsComment(
                subreddit=subreddit.group(1))
        else:
            commentReply = CommentBuilder.buildStatsComment()
    else:

        #The basic algorithm here is:
        #If it's an expanded request, build a reply using the data in the braces, clear the arrays, add the reply to the relevant array and ignore everything else.
        #If it's a normal request, build a reply using the data in the braces, add the reply to the relevant array.

        #Counts the number of expanded results vs total results. If it's not just a single expanded result, they all get turned into normal requests.
        numOfRequest = 0
        numOfExpandedRequest = 0
        forceNormal = False

        for match in re.finditer("\{{2}([^}]*)\}{2}|\<{2}([^>]*)\>{2}",
                                 comment.body, re.S):
            numOfRequest += 1
            numOfExpandedRequest += 1

        for match in re.finditer(
                "(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))|(?<=(?<!\<)\<)([^\<\>]*)(?=\>(?!\>))",
                comment.body, re.S):
            numOfRequest += 1

        if (numOfExpandedRequest >= 1) and (numOfRequest > 1):
            forceNormal = True

        #Expanded Anime
        for match in re.finditer("\{{2}([^}]*)\}{2}", comment.body, re.S):
            reply = ''

            if (forceNormal) or (str(comment.subreddit).lower()
                                 in disableexpanded):
                reply = Search.buildAnimeReply(match.group(1), False, comment)
            else:
                reply = Search.buildAnimeReply(match.group(1), True, comment)

            if (reply is not None):
                animeArray.append(reply)

        #Normal Anime
        for match in re.finditer("(?<=(?<!\{)\{)([^\{\}]*)(?=\}(?!\}))",
                                 comment.body, re.S):
            reply = Search.buildAnimeReply(match.group(1), False, comment)

            if (reply is not None):
                animeArray.append(reply)

        #Expanded Manga
        #NORMAL EXPANDED
        for match in re.finditer("\<{2}([^>]*)\>{2}(?!(:|\>))", comment.body,
                                 re.S):
            reply = ''

            if (forceNormal) or (str(comment.subreddit).lower()
                                 in disableexpanded):
                reply = Search.buildMangaReply(match.group(1), False, comment)
            else:
                reply = Search.buildMangaReply(match.group(1), True, comment)

            if (reply is not None):
                mangaArray.append(reply)

        #AUTHOR SEARCH EXPANDED
        for match in re.finditer("\<{2}([^>]*)\>{2}:\(([^)]+)\)", comment.body,
                                 re.S):
            reply = ''

            if (forceNormal) or (str(comment.subreddit).lower()
                                 in disableexpanded):
                reply = Search.buildMangaReplyWithAuthor(
                    match.group(1), match.group(2), False, comment)
            else:
                reply = Search.buildMangaReplyWithAuthor(
                    match.group(1), match.group(2), True, comment)

            if (reply is not None):
                mangaArray.append(reply)

        #Normal Manga
        #NORMAL
        for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]+)\>(?!(:|\>))",
                                 comment.body, re.S):
            reply = Search.buildMangaReply(match.group(1), False, comment)

            if (reply is not None):
                mangaArray.append(reply)

        #AUTHOR SEARCH
        for match in re.finditer("(?<=(?<!\<)\<)([^\<\>]*)\>:\(([^)]+)\)",
                                 comment.body, re.S):
            reply = Search.buildMangaReplyWithAuthor(match.group(1),
                                                     match.group(2), False,
                                                     comment)

            if (reply is not None):
                mangaArray.append(reply)

        #Expanded LN
        for match in re.finditer("\]{2}([^]]*)\[{2}", comment.body, re.S):
            reply = ''

            if (forceNormal) or (str(comment.subreddit).lower()
                                 in disableexpanded):
                reply = Search.buildLightNovelReply(match.group(1), False,
                                                    comment)
            else:
                reply = Search.buildLightNovelReply(match.group(1), True,
                                                    comment)

            if (reply is not None):
                lnArray.append(reply)

        #Normal LN
        for match in re.finditer("(?<=(?<!\])\])([^\]\[]*)(?=\[(?!\[))",
                                 comment.body, re.S):
            reply = Search.buildLightNovelReply(match.group(1), False, comment)

            if (reply is not None):
                lnArray.append(reply)

        #Here is where we create the final reply to be posted

        #The final comment reply. We add stuff to this progressively.
        commentReply = ''

        #Basically just to keep track of people posting the same title multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi})
        postedAnimeTitles = []
        postedMangaTitles = []
        postedLNTitles = []

        #Adding all the anime to the final comment. If there's manga too we split up all the paragraphs and indent them in Reddit markup by adding a '>', then recombine them
        for i, animeReply in enumerate(animeArray):
            if not (i is 0):
                commentReply += '\n\n'

            if not (animeReply['title'] in postedAnimeTitles):
                postedAnimeTitles.append(animeReply['title'])
                commentReply += animeReply['comment']

        if mangaArray:
            commentReply += '\n\n'

        #Adding all the manga to the final comment
        for i, mangaReply in enumerate(mangaArray):
            if not (i is 0):
                commentReply += '\n\n'

            if not (mangaReply['title'] in postedMangaTitles):
                postedMangaTitles.append(mangaReply['title'])
                commentReply += mangaReply['comment']

        if lnArray:
            commentReply += '\n\n'

        #Adding all the manga to the final comment
        for i, lnReply in enumerate(lnArray):
            if not (i is 0):
                commentReply += '\n\n'

            if not (lnReply['title'] in postedLNTitles):
                postedLNTitles.append(lnReply['title'])
                commentReply += lnReply['comment']

        #If there are more than 10 requests, shorten them all
        if not (commentReply is '') and (
                len(animeArray) + len(mangaArray) + len(lnArray) >= 10):
            commentReply = re.sub(r"\^\((.*?)\)", "", commentReply, flags=re.M)

    #If there was actually something found, add the signature and post the comment to Reddit. Then, add the comment to the "already seen" database.
    if commentReply is not '':
        '''if (comment.author.name == 'treborabc'):
            commentReply = '[No.](https://www.reddit.com/r/anime_irl/comments/4sba1n/anime_irl/d58xkha)'''

        commentReply += Config.getSignature(comment.permalink)

        commentReply += Reference.get_bling(comment.author.name)

        if is_edit:
            return commentReply
        else:
            try:
                comment.reply(commentReply)
                print("Comment made.\n")
            except praw.errors.Forbidden:
                print('Request from banned subreddit: ' +
                      str(comment.subreddit) + '\n')
            except Exception:
                traceback.print_exc()

            try:
                DatabaseHandler.addComment(comment.id, comment.author.name,
                                           comment.subreddit, True)
            except:
                traceback.print_exc()
    else:
        try:
            if is_edit:
                return None
            else:
                DatabaseHandler.addComment(comment.id, comment.author.name,
                                           comment.subreddit, False)
        except:
            traceback.print_exc()
Пример #30
0
	def similarQuery(self, word):
		return Search.similarQuery(word, self.UserClass)
Пример #31
0
#
# creates a new mongodb database with the first thousand
# and last thousand posts that the queries "show hn" and "showhn" find
# in collection 'posts'
#

import Search
from pymongo import Connection
import pymongo

if __name__ == '__main__':
    connection = Connection(
        "mongodb://*****:*****@7fc2f09f.dotcloud.com:12015")
    db = connection['showhn']

    posts = Search.search_for_showhn()

    effort = 0
    bool = True
    while (bool):
        posts_collection = db['posts']
        effort += 1
        try:
            posts_collection.insert(posts, safe=True)
            bool = False
        except pymongo.errors.OperationFailure:
            posts_collection.drop()
            bool = True

    print effort
Пример #32
0
    def motionEs(self, Yc, Yr, pix):

        _search = Search()
        cpoint = _search.full_search(Yc, Yr, pix)

        return cpoint
Пример #33
0
def searchBox(search):
    # this function returns result based on all potential search elements
    return Search.searchAll(myDB, myCursor, search)
Пример #34
0
def main():
    """ 主函数 """
    Search.startSearch()
 def sim_2(self):
     test = Search.NewtonMethod(self.error_2,1.1*2*math.pi,0.01,0.01)
     result = test.do_it()
     self.vL = result
     return result
Пример #36
0
def process_comment(comment, is_edit=False):
    """ process dat comment """
    # Anime/Manga requests that are found go into separate arrays
    animeArray = []
    mangaArray = []
    lnArray = []
    vnArray = []

    # ignores all "code" markup (i.e. anything between backticks)
    comment.body = re.sub(r"\`[{<\[]+(.*?)[}>\]]+\`", "", comment.body)

    num_so_far = 0

    numOfRequest = 0
    numOfExpandedRequest = 0

    # Ignore any blacklisted users
    if (comment.author.name.lower() in user_blacklist):
        print('User in blacklist: ' + comment.author.name)
        commentReply = ''
    # This checks for requests. First up we check all known tags for the !stats request
    elif re.search('({!stats.*?}|{{!stats.*?}}|<!stats.*?>|<<!stats.*?>>)', comment.body, re.S) is not None:
        username = USERNAME_PATTERN.search(comment.body)
        subreddit = SUBREDDIT_PATTERN.search(comment.body)

        if username:
            commentReply = CommentBuilder.buildStatsComment(
                username=username.group(1)
            )
        elif subreddit:
            commentReply = CommentBuilder.buildStatsComment(
                subreddit=subreddit.group(1)
            )
        else:
            commentReply = CommentBuilder.buildStatsComment()
    else:

        # The basic algorithm here is:
        # If it's an expanded request, build a reply using the data in the
        # braces, clear the arrays, add the reply to the relevant array and
        # ignore everything else. If it's a normal request, build a reply using
        # the data in the braces, add the reply to the relevant array.

        # Counts the number of expanded results vs total results. If it's not
        # just a single expanded result, they all get turned into normal
        # requests.

        forceNormal = False

        for match in find_requests('all', comment.body, expanded=True):
            numOfRequest += 1
            numOfExpandedRequest += 1

        for match in find_requests('all', comment.body):
            numOfRequest += 1

        if (numOfExpandedRequest >= 1) and (numOfRequest > 1):
            forceNormal = True

        # Determine whether we'll build an expanded reply just once.
        subredditName = str(comment.subreddit).lower()
        isExpanded = False if (forceNormal or (subredditName in disableexpanded)) else True

        # The final comment reply. We add stuff to this progressively.
        commentReply = ''

        # Expanded Anime
        for match in find_requests('anime', comment.body, expanded=True):
            if num_so_far < 30:
                reply = Search.buildAnimeReply(match, isExpanded, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    animeArray.append(reply)

        # Normal Anime
        for match in find_requests('anime', comment.body):
            if num_so_far < 30:
                reply = Search.buildAnimeReply(match, False, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    animeArray.append(reply)

        # Expanded Manga
        # NORMAL EXPANDED
        for match in find_requests('manga', comment.body, expanded=True):
            if num_so_far < 30:
                reply = Search.buildMangaReply(match, isExpanded, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    mangaArray.append(reply)

        # Normal Manga
        # NORMAL
        for match in find_requests('manga', comment.body):
            if num_so_far < 30:
                reply = Search.buildMangaReply(match, False, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    mangaArray.append(reply)

        # Expanded LN
        for match in find_requests('light_novel', comment.body, expanded=True):
            if num_so_far < 30:
                reply = Search.buildLightNovelReply(match, isExpanded, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    lnArray.append(reply)

        # Normal LN
        for match in find_requests('light_novel', comment.body):
            if num_so_far < 30:
                reply = Search.buildLightNovelReply(match, False, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    lnArray.append(reply)

        # Expanded VN
        for match in find_requests('visual_novel', comment.body, expanded=True):
            if num_so_far < 30:
                reply = Search.buildVisualNovelReply(match, isExpanded, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    vnArray.append(reply)

        # Normal VN
        for match in find_requests('visual_novel', comment.body):
            if num_so_far < 30:
                reply = Search.buildVisualNovelReply(match, False, comment)

                if (reply is not None):
                    num_so_far = num_so_far + 1
                    vnArray.append(reply)

        # Here is where we create the final reply to be posted

        # Basically just to keep track of people posting the same title
        # multiple times (e.g. {Nisekoi}{Nisekoi}{Nisekoi})
        postedAnimeTitles = []
        postedMangaTitles = []
        postedLNTitles = []
        postedVNTitles = []

        # Adding all the anime to the final comment. If there's manga too we
        # split up all the paragraphs and indent them in Reddit markup by
        # adding a '>', then recombine them
        for i, animeReply in enumerate(animeArray):
            if not (i is 0):
                commentReply += '\n\n'

            if not (animeReply['title'] in postedAnimeTitles):
                postedAnimeTitles.append(animeReply['title'])
                commentReply += animeReply['comment']

        if mangaArray:
            commentReply += '\n\n'

        # Adding all the manga to the final comment
        for i, mangaReply in enumerate(mangaArray):
            if not (i is 0):
                commentReply += '\n\n'

            if not (mangaReply['title'] in postedMangaTitles):
                postedMangaTitles.append(mangaReply['title'])
                commentReply += mangaReply['comment']

        if lnArray:
            commentReply += '\n\n'

        # Adding all the light novels to the final comment
        for i, lnReply in enumerate(lnArray):
            if not (i is 0):
                commentReply += '\n\n'

            if not (lnReply['title'] in postedLNTitles):
                postedLNTitles.append(lnReply['title'])
                commentReply += lnReply['comment']

        if vnArray:
            commentReply += '\n\n'

        # Adding all the visual novels to the final comment
        for i, vnReply in enumerate(vnArray):
            if not (i is 0):
                commentReply += '\n\n'

            if not (vnReply['title'] in postedVNTitles):
                postedVNTitles.append(vnReply['title'])
                commentReply += vnReply['comment']

        # If there are more than 10 requests, shorten them all
        lenRequests = sum(map(len, (animeArray, mangaArray, lnArray, vnArray)))
        if not (commentReply is '') and (lenRequests >= 10):
            commentReply = re.sub(r"\^\((.*?)\)", "", commentReply, flags=re.M)

    # If there was actually something found, add the signature and post the
    # comment to Reddit. Then, add the comment to the "already seen" database.
    if commentReply is not '':

        if num_so_far >= 30:
            commentReply += ("\n\nI'm limited to 30 requests at once and have "
                             "had to cut off some, sorry for the "
                             "inconvenience!\n\n")

        commentReply += Config.getSignature(comment.permalink)

        commentReply += Reference.get_bling(comment.author.name)

        total_expected = int(numOfRequest)
        total_found = sum(map(len, (animeArray, mangaArray, lnArray, vnArray)))

        if total_found != total_expected:
            commentReply += '&#32;|&#32;({0}/{1})'.format(total_found,
                                                          total_expected)

        if is_edit:
            return commentReply
        else:
            try:
                comment.reply(commentReply)
                print("Comment made.\n")
            except praw.errors.Forbidden:
                print('Request from banned '
                      'subreddit: {0}\n'.format(comment.subreddit))
            except Exception as e:
                logger.debug(traceback.print_exc())
                logger.warn(e)

            comment_author = comment.author.name if comment.author else '!UNKNOWN!'

            try:
                DatabaseHandler.addComment(
                    comment.id,
                    comment_author,
                    comment.subreddit,
                    True
                )
            except Exception as e:
                logger.debug(traceback.print_exc())
                logger.warn(e)
    else:
        try:
            if is_edit:
                return None
            else:
                comment_author = comment.author.name if comment.author else '!UNKNOWN!'

                DatabaseHandler.addComment(
                    comment.id,
                    comment_author,
                    comment.subreddit,
                    False
                )
        except Exception as e:
            logger.debug(traceback.print_exc())
            logger.warn(e)
Пример #37
0
import Search as src

x = int(input("Input a number bro "))

list01 = []
for a in range(x):
    y = int(input())
    list01.append(y)

search_item = int(input())

src.binary_search(list01, 0, len(list01), search_item)
Пример #38
0
 def search(self, start, end, heuristic, greedy):
     search = Search.Search(self.api,
                            heuristic)
     return search.greedy(start, end) if greedy else search.a_star(start, end)
Пример #39
0
    def executePO(self, rawLogs, serverData, run=False):
        logPharserObj = logPharser.logPharser()
        searchObj = Search.logSearch()

        poDefFile = open(Config.PODEFINITIONFILE, 'r').read()
        serlst = []
        for i in range(len(serverData)):
            l = dict(serverData[i])
            if l.has_key('data'):
                serlst.append(l['data'])

        fdaPODefinition = logPharserObj.extractDic(
            poDefFile,
            extractCommunicationTemplate=False,
            extractFDATemplate=True)
        comPODefinition = logPharserObj.extractDic(
            poDefFile,
            extractCommunicationTemplate=True,
            extractFDATemplate=False)

        matchingFDAPOTemplate, matchingComPOTemplate = logPharserObj.matchDictTemplate(
            fdaPODefinition, comPODefinition)

        rpmPOList = searchObj.keywordSearch(rawLogs, *Config.RPMKEYS)
        fdaPOList = searchObj.keywordSearch(rawLogs, *Config.FDAKEYS)
        comPOList = searchObj.keywordSearch(rawLogs, *Config.COMMUNICATIONKEYS)
        rpmPOStr = '\n'.join(rpmPOList)
        fdaPOStr = '\n'.join(fdaPOList)
        comPOStr = '\n'.join(comPOList)

        rpmPOLst = logPharserObj.extractDic(rpmPOStr)

        fdaPOLst = logPharserObj.extractDic(fdaPOStr)

        comPOTempLst = logPharserObj.extractDic(comPOStr)
        comPOLst = logPharserObj.extractDictofDict(comPOTempLst)
        # serverPOLst = logPharserObj.extractDic(serlst)

        rpmPOValues = logPharserObj.checkformat(matchingFDAPOTemplate,
                                                rpmPOLst, Config.PO[0])
        fdaPOValues = logPharserObj.checkformat(matchingFDAPOTemplate,
                                                fdaPOLst, Config.PO[0])
        comPOValues = logPharserObj.checkformat(matchingComPOTemplate,
                                                comPOLst, Config.PO[0])
        serverPOValues = logPharserObj.checkformat(matchingComPOTemplate,
                                                   serlst, Config.PO[0])

        rpmPOTmpVal = []
        fdaPOTmpVal = []
        comPOTmpVal = []
        serverPOTmpVal = []
        for i in range(len(rpmPOValues)):
            dict1 = dict(rpmPOValues[i])

            if dict1.has_key(Config.PO[0]):
                rpmPOTmpVal.append(dict1.copy())
        for i in range(len(fdaPOValues)):
            dict1 = dict(fdaPOValues[i])

            if dict1.has_key(Config.PO[0]):
                fdaPOTmpVal.append(dict1)

        for i in range(len(comPOValues)):
            dict1 = dict(comPOValues[i])

            if dict1.has_key(Config.PO[0]):
                comPOTmpVal.append(dict1)

        for i in range(len(serverPOValues)):
            dict1 = dict(serverPOValues[i])

            if dict1.has_key(Config.PO[0]):
                serverPOTmpVal.append(dict1)

        rpmFinalPO = logPharserObj.removeDuplicate(rpmPOTmpVal)
        fdaFinalPO = logPharserObj.removeDuplicate(fdaPOTmpVal)
        comFinalPO = logPharserObj.removeDuplicate(comPOTmpVal)
        serverFinalPO = logPharserObj.removeDuplicate(serverPOTmpVal)

        print matchingFDAPOTemplate
        print matchingComPOTemplate
        print comFinalPO
        PORPM, POFDA = logPharserObj.matchListEqual(rpmFinalPO, fdaFinalPO,
                                                    matchingFDAPOTemplate,
                                                    matchingComPOTemplate)

        POFDACOM, POCOM = logPharserObj.matchListEqual(POFDA, comFinalPO,
                                                       matchingFDAPOTemplate,
                                                       matchingComPOTemplate)

        POSERVERCOM, POSERVER = logPharserObj.matchListEqual(
            POCOM, serverFinalPO, matchingFDAPOTemplate, matchingComPOTemplate)
        print len(POSERVER)
        print len(POCOM)

        rpmPOMatched, rpmPONonMatched = logPharserObj.matchValues(
            PORPM, POFDACOM, 'date')

        fdaPOMatched, fdaPONonMatched = logPharserObj.matchValues(
            POFDACOM, PORPM, 'date')
        fdacomBPMatched, fdacomBPNonMatched = logPharserObj.matchValues(
            POFDACOM, POCOM, 'date')
        comPOMatched, comPONonMatched = logPharserObj.matchValues(
            POCOM, fdaPOMatched, 'date')
        serverPOMatched, serverPONonMatched = logPharserObj.matchValues(
            POSERVERCOM, PORPM, 'date')

        print rpmPOMatched
        rpmPOPass = logPharserObj.extractPassData(Config.PO, rpmPOMatched)

        fdaPOPass = logPharserObj.extractPassData(Config.PO, fdaPOMatched)

        # fdaBPPass = logPharserObj.extractPassData(Config.BP, fdacomBPMatched)
        comPOPass = logPharserObj.extractPassData(Config.PO, comPOMatched)

        serverPOPass = logPharserObj.extractPassData(Config.PO,
                                                     serverPOMatched)

        POpasscount = logPharserObj.addData('Pass', fdaPOPass)
        POfailcount = logPharserObj.addData('Fail', fdaPONonMatched)
        dataType = logPharserObj.addData('PO', POpasscount)
        dataType1 = logPharserObj.addData('PO', POfailcount)
        # print POCOM
        # print len(comPOPass)
        # print len(rpmPOPass)
        # print len(fdaPOPass)

        fdaPONonMatchedFinal = list(
            str(fdaPONonMatched[i]) for i in range(len(fdaPONonMatched)))

        comPONonMatchedFinal = list(
            str(comPONonMatched[i]) for i in range(len(comPONonMatched)))

        df = pd.DataFrame()
        df1 = pd.DataFrame()
        df2 = pd.DataFrame()
        df3 = pd.DataFrame()
        df['Data Type'] = dataType
        df['RPM'] = rpmPOPass
        df['FDA'] = fdaPOPass
        df['Com'] = comPOPass
        df2['Server'] = serverPOPass
        df['Result'] = POpasscount

        df1['Data Type'] = dataType1
        df1['RPM'] = rpmPONonMatched
        df1['FDA'] = fdaPONonMatched
        df1['Com'] = comPONonMatched
        df3['Server'] = serverPONonMatched
        df1['Result'] = POfailcount

        h = df.append(df1, ignore_index=True)
        h2 = df2.append(df3, ignore_index=True)
        df4 = pd.concat([h, h2], axis=1)
        writer = pd.ExcelWriter('POResult.xlsx', engine='xlsxwriter')
        df4.to_excel(writer, sheet_name='Report', index=False)

        workbook = writer.book
        worksheet = writer.sheets['Report']
        writer.save()
        writer.close()
Пример #40
0
 def Solve(self):
     result, states = Search.IDDFS(self)
     actions = [self.actionName[x] for x in result]
     return actions, states
Пример #41
0
 def sim_2(self):
     times = self.start_times()
     print times
     test1 = Search.GoldenSection(self.error_1, times[0], times[1], 0.0001)
     result1 = test1.do_it()
     return result1
 def sim_3(self):
     test = Search.NewtonMethod(self.error_3,1.9,0.01,0.01) 
     result = test.do_it()
     return result
Пример #43
0
    def start(self):
        csvFileName = self.query + ".csv"
        print(csvFileName)
        g = Search.load_csv(f=csvFileName)

        return str(g.search(self.query))
Пример #44
0
 def start_search(name):
     start_search = Search(Phone_book)
     output = start_search.start(name)
     self.output.delete('1.0', tk.END)
     self.output.insert(tk.INSERT, 'u zocht op %s \n\n' % name)
     self.output.insert(tk.INSERT, output)
Пример #45
0
def searchInUserCity(userCity):
    # this function returns all shops present in the user's city
    return Search.searchFilter(myDB, myCursor, userCity, 'city')
Пример #46
0
    def __execute_select(self, sql, params, pub_key, search_keys):
        parsed = sqlparse.parse(sql)[0]
        col_names = []
        is_wildcard = False

        for t in parsed.tokens:
            if t.ttype == tokens.Keyword:
                break

            if type(t) == sqlparse.sql.IdentifierList:
                col_names = [x.value for x in t.get_identifiers()]
                break

            if type(t) == sqlparse.sql.Identifier:
                col_names = [t.value]
                break

            if t.ttype == tokens.Wildcard:
                is_wildcard = True
                break

        table_name = None
        saw_from = False
        for t in parsed.tokens:
            if saw_from and type(t) == sqlparse.sql.Identifier:
                table_name = t.value
                break

            if t.ttype == tokens.Keyword and t.value.lower() == "from":
                saw_from = True

        if table_name not in self.ld_conn.tables:
            raise ValueError("Table {} does not exist".format(table_name))

        if is_wildcard:
            # TODO: Should we calculate this at startup?
            cur = self.ld_conn.sqlite_conn().cursor()
            # table_name should be whitelisted
            cur.execute("PRAGMA table_info({})".format(table_name))
            col_names = [x[1] for x in cur.fetchall()]

        self.last_query_type = QueryType.REWRITTEN_SELECT
        self.last_query_table = table_name
        self.last_query_columns = col_names

        # If there is a LIKE, we need to disable it (by replacing with LIKE '%%')
        # When no search keys provided, select row id and column content (to be used in metadata fetch)
        # If search keys provided, select row id and search blob, perform search, then select final ids

        # Disable LIKE if exists, and flag it
        is_search = False
        search_column = None

        for t in parsed.tokens:
            if type(t) == sqlparse.sql.Where:
                last_identifier = ""
                found_like = False
                for where_t in t.tokens:
                    if type(where_t) == sqlparse.sql.Identifier:
                        last_identifier = where_t.value

                    # Only flag a LIKE if column is encrypted
                    if (last_identifier
                            in self.ld_conn.encrypted_columns[table_name]
                            and where_t.ttype == tokens.Keyword
                            and where_t.value.lower() == "like"):
                        found_like = True
                        is_search = True
                        search_column = last_identifier

                    if where_t.ttype == tokens.Literal.String.Single and found_like:
                        where_t.value = "'%%'"
                        break

        # Snapshot query here before we insert pub keys (for search to restore)
        snapshot = str(parsed)

        # Insert pub key matching
        # Matching condition
        def insert_cond(cond_sql):
            cond_parsed = sqlparse.parse(cond_sql)[0]

            # Create WHERE clause if it doesn't exist, else concatenate condition to AND
            found_where = False
            for t in parsed.tokens:
                if type(t) == sqlparse.sql.Where:
                    found_where = True
                    t.tokens.extend(sqlparse.parse(" AND ")[0].tokens)
                    t.tokens.extend(cond_parsed.tokens)

            if not found_where:
                # Historical artifact found here! Congratulations
                parsed.tokens.append(
                    sqlparse.sql.Token(sqlparse.tokens.Whitespace, ' '))
                parsed.tokens.append(
                    sqlparse.sql.Where(
                        sqlparse.sql.TokenList([
                            sqlparse.sql.Token(sqlparse.tokens.Keyword,
                                               'WHERE'),
                            sqlparse.sql.Token(sqlparse.tokens.Whitespace,
                                               ' '), *cond_parsed.tokens
                        ])))

        cond_sql = "id IN (SELECT id FROM Lockdown_{} WHERE pub_key=(?)) ".format(
            table_name)
        insert_cond(cond_sql)

        if is_search:
            for i, t in enumerate(parsed.tokens):
                if type(t) == sqlparse.sql.IdentifierList or type(
                        t
                ) == sqlparse.sql.Identifier or t.ttype == tokens.Wildcard or t.ttype == tokens.Keyword:
                    if search_keys is None:
                        # For metadata step, we want to first replace the result columns with id, column
                        parsed.tokens[i] = sqlparse.parse(
                            "id, {}".format(search_column))[0].tokens[0]
                        self.last_query_columns = ["id", search_column]
                        col_names = self.last_query_columns
                    else:
                        # For search step we want to replace with id, search_blob
                        parsed.tokens[i] = sqlparse.parse(
                            "id, search_blob")[0].tokens[0]
                    break

        # For metadata step, do nothing now (will return id, column metadata when fetchall is called)
        # For search step, run query ahead of time, find relevant ids, and then run original query (selecting those ids)
        if is_search and search_keys is not None:
            saw_from = False
            for i, t in enumerate(parsed.tokens):
                if saw_from and type(t) == sqlparse.sql.Identifier:
                    parsed.tokens[i] = sqlparse.parse(
                        "({} NATURAL JOIN Lockdown_Search_{})".format(
                            table_name, table_name))[0].tokens[0]
                    break

                if t.ttype == tokens.Keyword and t.value.lower() == "from":
                    saw_from = True

            self.cursor.execute(str(parsed), [*params, pub_key])
            rows = self.cursor.fetchall()

            found_ids = []
            for row_id, search_blob_json in rows:
                search_blob = [
                    base64.b64decode(x) for x in json.loads(search_blob_json)
                ]
                search_key = [
                    base64.b64decode(x) for x in search_keys[str(row_id)]
                ]

                if Search.search(search_blob, search_key[0], search_key[1]):
                    found_ids.append(row_id)

            # Reset query to use different condition insert
            parsed = sqlparse.parse(snapshot)[0]
            insert_cond("id IN ({}) ".format(",".join(
                str(x) for x in found_ids)))

        # TODO: investigate if this is the right position to put pub key, can the match_sql ever appear before any param?
        # TODO: We could just use a named parameter instead of a question mark? https://stackoverflow.com/questions/29686112/named-parameters-in-python-sqlite-query

        if is_search and search_keys is not None:
            # Don't insert pub key anymore, we have specific ids
            self.cursor.execute(str(parsed), params)
        else:
            self.cursor.execute(str(parsed), [*params, pub_key])
Пример #47
0
import Search

word = input("enter the word : ").lower()
output = Search.searchData(word)

if type(output) == list:
    for item in output:
        print(item)
else:
    print(output)
Пример #48
0
if __name__ == "__main__":

    # New logs and ebay directory
    Logs.New()
    Ebay.New()

    dbErrors = TinyDB(
        os.path.dirname(os.path.dirname(__file__)) + "/Logs/Errors")
    dbErrors.purge_tables()  # Reset Errors Data Base

    SearchText = Searching  # Data to search
    for i in SpecificProductSearch:  # Add all the Specific product search stuff to Seart Text
        SearchText.append(i)

    if (ReuseSearch == 0):  # If getting new search items
        Search.New()
        for SearchingTitleText in SearchText:
            TotalSearch(SearchingTitleText, SearchEverything)

    DBName = str(
        max([
            float(s.replace('.json', '')) for s in os.listdir(
                str(os.path.dirname(os.path.dirname(__file__))) +
                '/DataBase/Searches/')
        ]))
    dbSearches = TinyDB(
        os.path.dirname(os.path.dirname(__file__)) + "/DataBase/Searches/" +
        DBName)
    Data = dbSearches.table("_default").all()

    Analisis(Data)  # Analise data
Пример #49
0
 def post(self):
     book_id = self.request.get('delete_book_id')
     book = Book.query(Book.Id == book_id).get()
     if book:
         Search.delete_index(book_id)
         book.key.delete()
Пример #50
0
    print("AddressBook is empty")

userChoice = menu()
while userChoice != 6:

    if userChoice == 1:  #add entry to addressBook
        Add.addEntry(addressBook)

    elif userChoice == 2:  #update entry to addressBook
        Update.updateEntry(addressBook)

    elif userChoice == 3:  #remove entry to addressBook
        Remove.removeEntry(addressBook)

    elif userChoice == 4:  #search for entry in addressBook
        Search.searchEntry(addressBook)

    elif userChoice == 5:  #print out entire book
        for i in addressBook:
            print(addressBook[i])

    userChoice = menu()

#output dictionary to binary file for next time
try:
    ofile = open("address.dat", "wb")  #open binary file for output
    pickle.dump(addressBook, ofile)
    ofile.close()
except:
    print("Error writing file, sorry")
Пример #51
0
import Problem
import Search

timeToLive = 100
numOfEmployedBees = 50
maxIteration = 1000

print("Bee Colony!")

problem = Problem.Problem(timeToLive)

abc = Search.Search()

abc.algorithm(problem, numOfEmployedBees, maxIteration)
Пример #52
0
 def __init__(self):
     '''
     Initialze the Agent for the first time
     '''
     self.Initialize()
     self.searchEngine = Search.SearchEngine()
Пример #53
0
#
# creates a new mongodb database with the first thousand
# and last thousand posts that the queries "show hn" and "showhn" find
# in collection 'posts'
#

import Search
from pymongo import Connection
import pymongo

if __name__ == '__main__':
    connection = Connection("mongodb://*****:*****@7fc2f09f.dotcloud.com:12015")
    db = connection['showhn']


    posts = Search.search_for_showhn()

    effort = 0
    bool = True
    while (bool):
        posts_collection = db['posts']
        effort += 1
        try:
            posts_collection.insert(posts,safe=True)
            bool = False
        except pymongo.errors.OperationFailure:
            posts_collection.drop()
            bool = True

    print effort
Пример #54
0
 def search(self, name, option):
     audio = Search.find()
     self.url, self.title, minutes, seconds = audio.load(name, option)
     return self.title, minutes, seconds
Пример #55
0
    bnet = Sequential()
    arch = params["arch"]
    bnet.add(
        Dense(arch[0], input_shape=(width, ), activation=params["activation"]))
    for layer in arch[1:]:
        bnet.add(Dense(int(layer), activation=params["activation"]))
    bnet.add(Dense(4, activation="softmax"))
    optimizer = get_optimizer(params["optimizer"])
    bnet.compile(loss="categorical_crossentropy",
                 optimizer=optimizer(lr=params["lr"]),
                 metrics=["accuracy"])
    history = bnet.fit(btrain[0],
                       btrain_y,
                       validation_data=(bvalid[0], bvalid_y),
                       epochs=params["epochs"],
                       batch_size=params["batch_size"])
    classify = bnet.predict_classes(test_bdata)
    print(theautil.classifications(classify, test_blabels))
    score = bnet.evaluate(test_bdata, btest_y)
    print("Scores: %s" % score)
    return score[1]


state["f"] = f
random_results = Search.random_search(state,
                                      params,
                                      Search.heuristic_function,
                                      time=60)
random_results = sorted(random_results, key=lambda x: x['Score'])
print(random_results[-1])
Пример #56
0
 def doFind(self, pattern):
     self.lastSearchResults = Search.findInText(\
       self.GetText().split(self.eol), pattern, False)
     self.lastSearchPattern = pattern
     if len(self.lastSearchResults):
         self.lastMatchPosition = 0
Пример #57
0
 def get(self):
     [data, status, headers] = Search.Search(request, db)
     return json.loads(data), status, headers
Пример #58
0
def run(env, agent, target):
    env.reveal_cells(agent)
    big_loop = True
    while big_loop:
        search_type = int(
            input(
                "Which search would you like to do? (Forward A* [1], Backward A* [2], Adaptive A* [3])"
            ))
        check = False
        loop = True
        count = 0
        if search_type == 1:
            agent_node = Search.Node(agent.x, agent.y, None, None, None, None,
                                     None)
            tar_node = Search.Node(target.x, target.y, None, None, None, None,
                                   None)
            while loop:
                tie_type = int(
                    input(
                        "Which way would you like to break ties? (Greater G-Value [1], Smaller G-Value [2])"
                    ))
                if tie_type == 1:
                    start = time.time()
                    prev_cell = None
                    while check is False:
                        path, expand_list = Search.forward_a(
                            agent_node, tar_node, env)
                        if path is None:
                            break
                        count += len(expand_list)
                        for k in path:
                            x = k.row
                            y = k.column
                            pos_cell = env.environment_map[x][y]
                            if pos_cell.blocked is False:
                                if pos_cell != agent and pos_cell != target:
                                    if pos_cell.representation == ".":
                                        while prev_cell != pos_cell:
                                            prev_cell.representation = " "
                                            parent = prev_cell.parent
                                            prev_cell.parent = None
                                            prev_cell = parent
                                    else:
                                        pos_cell.parent = prev_cell
                                        pos_cell.representation = "."
                                agent_node.row = x
                                agent_node.column = y
                                if (agent_node.row == tar_node.row and
                                        agent_node.column == tar_node.column):
                                    check = True
                                    break
                                else:
                                    env.reveal_cells(pos_cell)
                                prev_cell = pos_cell
                            else:
                                break
                    end = time.time()
                    if check:
                        print(
                            "Congratulations! The agent made its way to the target in "
                            + str(round(end - start, 3)) +
                            " seconds and expanded " + str(count) +
                            " cells using Forward A*.")
                    else:
                        print(
                            "Unfortunately, the agent could not find its way to the target using Forward A*. The agent expanded "
                            + str(count) +
                            " cells and discovered the blocked path in " +
                            str(round(end - start, 3)) + " seconds.")
                    loop = False
                    big_loop = False
                    env.print_env()
                elif tie_type == 2:
                    start = time.time()
                    prev_cell = None
                    while check is False:
                        path, expand_list = Search.diff_tie_break(
                            agent_node, tar_node, env)
                        if path is None:
                            break
                        count += len(expand_list)
                        for k in path:
                            x = k.row
                            y = k.column
                            pos_cell = env.environment_map[x][y]
                            if pos_cell.blocked is False:
                                if pos_cell != agent and pos_cell != target:
                                    if pos_cell.representation == ".":
                                        while prev_cell != pos_cell:
                                            prev_cell.representation = " "
                                            parent = prev_cell.parent
                                            prev_cell.parent = None
                                            prev_cell = parent
                                    else:
                                        pos_cell.parent = prev_cell
                                        pos_cell.representation = "."
                                agent_node.row = x
                                agent_node.column = y
                                if (agent_node.row == tar_node.row and
                                        agent_node.column == tar_node.column):
                                    check = True
                                    break
                                else:
                                    env.reveal_cells(pos_cell)
                                prev_cell = pos_cell
                            else:
                                break
                    end = time.time()
                    if check:
                        print(
                            "Congratulations! The agent made its way to the target in "
                            + str(round(end - start, 3)) +
                            " seconds and expanded " + str(count) +
                            " cells using Forward A*.")
                    else:
                        print(
                            "Unfortunately, the agent could not find its way to the target using Forward A*. The agent expanded "
                            + str(count) +
                            " cells and discovered the blocked path in " +
                            str(round(end - start, 3)) + " seconds.")
                    loop = False
                    big_loop = False
                    env.print_env()
                else:
                    print("Error: Please enter valid input")
        elif search_type == 2:
            start = time.time()
            agent_node = Search.Node(agent.x, agent.y, None, None, None, None,
                                     None)
            tar_node = Search.Node(target.x, target.y, None, None, None, None,
                                   None)
            prev_cell = None
            while check is False:
                path, expand_list = Search.forward_a(tar_node, agent_node, env)
                if path is None:
                    break
                count += len(expand_list)
                path.reverse()
                for k in path:
                    x = k.row
                    y = k.column
                    pos_cell = env.environment_map[x][y]
                    if pos_cell.blocked is False:
                        if pos_cell != agent and pos_cell != target:
                            if pos_cell.representation == ".":
                                while prev_cell != pos_cell:
                                    prev_cell.representation = " "
                                    parent = prev_cell.parent
                                    prev_cell.parent = None
                                    prev_cell = parent
                            else:
                                pos_cell.parent = prev_cell
                                pos_cell.representation = "."
                        agent_node.row = x
                        agent_node.column = y
                        if (agent_node.row == tar_node.row
                                and agent_node.column == tar_node.column):
                            check = True
                            break
                        else:
                            env.reveal_cells(pos_cell)
                        prev_cell = pos_cell
                    else:
                        break
            end = time.time()
            if check:
                print(
                    "Congratulations! The agent made its way to the target in "
                    + str(round(end - start, 3)) + " seconds and expanded " +
                    str(count) + " cells using Backward A*.")
            else:
                print(
                    "Unfortunately, the agent could not find its way to the target using Backward A*. The agent expanded "
                    + str(count) +
                    " cells and discovered the blocked path in " +
                    str(round(end - start, 3)) + " seconds.")
            big_loop = False
            env.print_env()
        elif search_type == 3:
            start = time.time()
            agent_node = Search.Node(agent.x, agent.y, None, None, None, None,
                                     None)
            tar_node = Search.Node(target.x, target.y, None, None, None, None,
                                   None)
            prev_cell = None
            expand_list = None
            path = None
            search_func = 0
            while check is False:
                g_goal = -1
                if search_func == 0:
                    path, expand_list = Search.forward_a(
                        agent_node, tar_node, env)
                    expanded_list = expand_list
                    search_func = 1
                else:
                    path, expand_list = Search.adaptive_a(
                        agent_node, tar_node, g_goal, expanded_list, env)
                    set_1 = set(expanded_list)
                    set_2 = set(expand_list)
                    set_diff = set_1 - set_2
                    expanded_list = expanded_list + list(set_diff)
                if path is None:
                    break
                g_goal += len(path)
                count += len(expand_list)
                for k in path:
                    x = k.row
                    y = k.column
                    pos_cell = env.environment_map[x][y]
                    if pos_cell.blocked is False:
                        if pos_cell != agent and pos_cell != target:
                            if pos_cell.representation == ".":
                                while prev_cell != pos_cell:
                                    prev_cell.representation = " "
                                    parent = prev_cell.parent
                                    prev_cell.parent = None
                                    prev_cell = parent
                            else:
                                pos_cell.parent = prev_cell
                                pos_cell.representation = "."
                        agent_node.row = x
                        agent_node.column = y
                        if (agent_node.row == tar_node.row
                                and agent_node.column == tar_node.column):
                            check = True
                            break
                        else:
                            env.reveal_cells(pos_cell)
                            prev_cell = pos_cell
                    else:
                        break
            end = time.time()
            if check:
                print(
                    "Congratulations! The agent made its way to the target in "
                    + str(round(end - start, 3)) + " seconds and expanded " +
                    str(count) + " cells using Adaptive A*.")
            else:
                print(
                    "Unfortunately, the agent could not find its way to the target using Adaptive A*. The agent expanded "
                    + str(count) +
                    " cells and discovered the blocked path in " +
                    str(round(end - start, 3)) + " seconds.")
            big_loop = False
            env.print_env()
        else:
            print("Error: Please enter valid input")
Пример #59
0
def SearchAPI(request):
    if (request.method == 'OPTIONS'):
        return '', 204, headers
    return Search.Search(request, db)
Пример #60
0
def menu():
    try:
        while True:
            try:

                print("""
                        |-------------------------------------------------------------------------------|   
                        |   1.Thu thập link tuyển dụng                                                  |
                        |   2.Thu thập thông tin tuyển dụng(Tiêu đề, mô tả, vị trí tuyển dụng, . . .)   |
                        |   3.Thu thập thông tin ứng viên(Tên, bằng cấp, địa chỉ, . . )                 |
                        |   --------------------------------------------------------------------------- |
                        |   4.Loại bỏ Stopword                                                          |
                        |   5.Thống kế bao nhiêu ngành nghề đang tuyển dụng(Nhà tuyển dụng)             |
                        |   6.Thống kê địa điểm tuyển dụng                                              |
                        |   7.Thống kê bao nhiêu ngành nghề đang cần việc làm(Ứng viên)                 |                                              
                        |   8.Chuyển thông tin về dạng số tf-idf                                        |
                        |   9. Tìm kiếm thông tin                                                       |
                        |   --------------------------------------------------------------------------- |
                        |   0.Thoát                                                                     |
                        |------------------------------------------------------------------------------ |
                        """)
                option = int(input("Chọn chức năng: "))
                if option == 1:
                    get_links_job_from_range_page()
                elif option == 2:
                    craw_from_links()
                elif option == 3:
                    get_ung_vien_theo_phan_trang()
                elif option == 4:
                    print("""
                Loại bỏ stopword trong:
                1. tieude
                2. mota
                3. Ví dụ loại bỏ stopword""")
                    a = int(input("Chọn: "))
                    if a == 1:
                        print("Tiêu đề sau khi loại bỏ stopword: ")
                        for i in loai_bo_stopword_trong_danhsach(tieude):
                            print(i)
                    elif a == 2:
                        print("Mô tả sau khi loại bỏ stopword: ")
                        for i in loai_bo_stopword_trong_danhsach(mota):
                            print(i)
                    else:
                        sentence = input("Nhập vào 1 câu: ")
                        print("------------------ Câu đã nhập: ", sentence)
                        print(
                            "------------------ Câu sau khi loại bỏ stopword: ",
                            loai_bo_stopword(sentence))
                elif option == 5:
                    thong_ke_nganh_nghe(nganh_nghe)
                elif option == 6:
                    thong_ke_dia_diem_tuyen_dung(dia_diem)
                elif option == 7:
                    thong_ke_nganh_nghe_ung_vien(nganh_nghe_ung_tuyen)
                elif option == 8:
                    bow_tf_idf.main()
                elif option == 9:
                    Search()
                else:
                    print("Đã thoát")
                    break
                    pass
            except Exception as erro:
                print("Hãy chọn đúng chức năng!")
                print(erro)
    except Exception as erro:
        print(erro)