def test_hits(self): # lru sa_cache = Cache.Cache(2, 8) data = [(x, x**2) for x in range(12)] for i in range(8): sa_cache.put(data[i][0], data[i][1]) for i in range(8): sa_cache.get(data[i][0]) self.assertEqual(i + 1, sa_cache.get_hits()) for i in range(8, 12): sa_cache.put(data[i][0], data[i][1]) for i in range(0, 4): sa_cache.get(data[i][0]) self.assertEqual(i + 1, sa_cache.get_misses()) # mru sa_cache = Cache.Cache(2, 8, "mru") for i in range(8): sa_cache.put(data[i][0], data[i][1]) for i in range(8): sa_cache.get(data[i][0]) self.assertEqual(i + 1, sa_cache.get_hits()) for i in range(8, 12): sa_cache.put(data[i][0], data[i][1]) for i in range(4, 8): sa_cache.get(data[i][0]) self.assertEqual(i - 3, sa_cache.get_misses())
def test_built_in_types(self): sa_cache = Cache.Cache(2, 8) # test put & get sa_cache.put("Username", "johanan_lai1997") self.assertEqual("johanan_lai1997", sa_cache.get("Username")) sa_cache.put("Save password", False) self.assertFalse(sa_cache.get("Save password")) sa_cache.put("Login attempts", 3) self.assertEqual(3, sa_cache.get("Login attempts")) sa_cache.put(200, 500) self.assertEqual(500, sa_cache.get(200)) # test remove sa_cache.remove("Login attempts") self.assertIsNone(sa_cache.get("Login attempts")) self.assertEqual(500, sa_cache.remove(200)) self.assertIsNone(sa_cache.get(200)) sa_cache.remove("Username") sa_cache.remove("Save password") self.assertIsNone(sa_cache.get("Username")) self.assertIsNone(sa_cache.get("Save password")) # test clear for entry in sa_cache._cache: self.assertFalse(entry) sa_cache.put("Username", "johanan_lai1997") sa_cache.put("Save password", False) sa_cache.put("Login attempts", 3) sa_cache.put(200, 500) sa_cache.clear() for entry in sa_cache._cache: self.assertFalse(entry) # test update sa_cache.put("Prompt on quit", True) self.assertTrue(sa_cache.get("Prompt on quit")) sa_cache.update("Prompt on quit", False) self.assertFalse(sa_cache.get("Prompt on quit")) # test replacement sa_cache2 = Cache.Cache(2, 2) sa_cache2.put("string1", 10923) self.assertEqual(10923, sa_cache2.get("string1")) sa_cache2.put("string2", False) self.assertEqual(10923, sa_cache2.get("string1")) self.assertFalse(sa_cache2.get("string2")) sa_cache2.get("string1") sa_cache2.put(1000, "hello world") self.assertEqual("hello world", sa_cache2.get(1000)) self.assertIsNone(sa_cache2.get("string2"))
def level_chosen(self, event): level = self.level_choice.GetStringSelection() # Get level chosen self.subject_choice.Clear() self.paper_checklist.Clear() self.year_choice.Clear() self.season_choice.Clear() self.num_choice.Clear() self.region_choice.Clear() self.pairs_info = {} self.files_info = {} if level == self.level_list[0]: # Not choosing a level return # Cache global cache_folder cache_subject = os.path.join(cache_folder, "GCE Guide %s" % level) if not os.path.exists(cache_subject): self.subject_dict = Crawler.visit_level( Crawler.levels_dict[level]) # Return subject list if self.subject_dict == -1: # Connection error wx.MessageBox( "Please check your Internet connection and retry.", "Connection Error") self.level_choice.SetSelection(0) return else: Cache.store(self.subject_dict, cache_subject) else: self.subject_dict = Cache.load(cache_subject) subject_list = ["----- Select subject -----" ] + [each for each in self.subject_dict] self.subject_choice.Set(subject_list) # Update subject list self.subject_choice.SetSelection(0)
def load( self, url = "", onLoad = None, onProgress = None, onError = None ): if self.path: url = self.path + url cached = Cache.get( url ) if cached: self.manager.itemStart( url ) if onLoad: onLoad( cached ) self.manager.itemEnd( url ) return cached try: self.manager.itemStart( url ) cached = pygame.image.load( url ) except IOError as error: if onErorr: onError( error ) self.manager.itemEnd( url ) self.manager.itemError( url ) if onProgress: onProgress() Cache.add( url, cached ) if onLoad: onLoad( cached ) self.manager.itemEnd( url ) return cached
def GraphStocks(site): cache = Cache.Fetch("json/cache.json", "all") keys = Cache.Fetch("json/prices.json")["keys"] metal = Cache.Fetch("json/prices.json")["metal"] plt.figure(1, figsize=(16, 6.67), dpi=75) plt.clf() history = [] prices = [] i = 0 for time in sorted(cache)[-10:]: print("Result found at " + time) history.append(time) prices.append(cache[time][site]) i = i + 1 plt.ylabel('Value in refined metal') plt.title(site + " ref \nTotal: " + str(prices[i - 1]) + " refined " + locale.format("%d", prices[i - 1] / keys, grouping=True) + " keys $" + locale.format("%d", prices[i - 1] * metal, grouping=True) + " USD") x = range(i) plt.xticks(x, history) plt.plot(x, prices, "g") plt.ylim(0) print("Saved as graphs/" + site + ".svg") plt.savefig("graphs/" + site + ".svg")
def on_radio_button(self, event): choice = event.GetEventObject() if choice.GetLabel() == "Use default path": self.current_setting["Default path mode"] = True else: self.current_setting["Default path mode"] = False Cache.store(self.current_setting, self.config_path)
def test_different_n(self): # lru sa_cache_2way = Cache.Cache(2, 16) sa_cache_4way = Cache.Cache(4, 16) data = [(x, x**2) for x in range(100)] for i in range(100): sa_cache_2way.put(data[i][0], data[i][1]) for i in range(100): sa_cache_4way.put(data[i][0], data[i][1]) self.assertIn( 2, [len(sa_cache_2way._cache[set_num]) for set_num in range(8)]) self.assertIn( 4, [len(sa_cache_4way._cache[set_num]) for set_num in range(4)]) # mru sa_cache_3way = Cache.Cache(3, 15, "mru") sa_cache_5way = Cache.Cache(5, 15, "mru") data = [(x, x**2) for x in range(100)] for i in range(100): sa_cache_3way.put(data[i][0], data[i][1]) for i in range(100): sa_cache_5way.put(data[i][0], data[i][1]) self.assertIn( 3, [len(sa_cache_3way._cache[set_num]) for set_num in range(5)]) self.assertIn( 5, [len(sa_cache_5way._cache[set_num]) for set_num in range(3)])
def setCache(self, S, E, B, m): self.mem.extend([0] * ((1 >> m) - self.size)) # self.cache.S = S # self.cache.E = E # self.cache.B = B # self.cache.m = m self.cache = Cache(S, E, B, m)
def store(key, value): key = getKeyPayload(key) Cache.update(key, value) if memory_dictionary[key] == None: raise Exception('Location in memory is not defined: {}'.format(key)) memory_dictionary[key] = value
def _getInfo(hashFileName, httpCall, *httpParas): result = Cache.getCache(hashFileName) if result == '': result = httpCall(*httpParas) Cache.writeCache(hashFileName, result) return result
def __init__(self): self.methods=['GET','POST','HEAD','PUT','DELETE'] self.imagesSupported=['jpg','png','gif'] self.mimeTypes = {'jpg': 'image/jpeg', 'png': 'image/png', 'gif': 'image/gif', 'html': 'text/html','mp3':'audio/mpeg','mp4':'video/mp4','application/x-www-form-urlencoded':'application/json'} self.status = {'404': 'HTTP/1.1 404 Not Found', '400': 'HTTP/1.1 400 Bad Request', '403': 'HTTP/1.1 403 Forbidden','200':'HTTP/1.1 200 OK'} self.cache=Cache() self.authenticator=Authenticator() self.timeout=10
def getRadar(self, code): self._radar = Cache.read(BOM.modcachekey + '-radar(' + code + ')') if self._radar is None: img_file = urllib2.urlopen(self._radarbaseurl + code + '.gif' ) self._radar = img_file.read() Cache.write(BOM.modcachekey + '-radar(' + code + ')', self._radar, 360) return Image.open(StringIO(self._radar))
def getFeed(self, feed_url ): if not self.feed_url and self.provider.isDuplicateFeed( feed_url ): self.dead = True self.changed() else: self.feed_url = feed_url # if we have a feed object, then I'm not interested in re-parsing a stale file. wantStale = not self.feed Cache.getContentOfUrlAndCallback( callback = self.gotFeed, url = feed_url, username = self.username(), password = self.password(), timeout = self.timeout(), wantStale = wantStale, failure = self.failed )
def __init__(self, blockno, wordsinblock, fileloc): mem = [] with open(fileloc) as f: lines = f.readlines() for line in lines: token = string.split(string.lower(line)) if token[0] != 'go': mem.append((int(token[1]), int(token[0], 0))) self.codeCache = Cache.Cache(mem, blockno, wordsinblock) self.dataCache = Cache.Cache(mem, blockno, wordsinblock)
def on_change_path(self, event): dlg = wx.DirDialog(self, "Choose the default folder for past paper") if dlg.ShowModal() == wx.ID_OK: folder_directory = dlg.GetPath() self.current_setting["Default path"] = folder_directory self.default_directory.SetLabel(folder_directory) Cache.store(self.current_setting, self.config_path) dlg.Destroy() else: dlg.Destroy() return
def __init__(self, parent): wx.Panel.__init__(self, parent) self.config_path = Cache.preference_directory() self.current_setting = Cache.load(self.config_path) level_txt = wx.StaticText(self, label="Default level:") self.level_choice = wx.Choice(self, choices=["--- Select level ---", "IGCSE", "AS & A-Level", "O-Level"]) self.level_choice.SetSelection(self.current_setting["Default level"]) self.Bind(wx.EVT_CHOICE, self.on_choose_level, self.level_choice) level_sizer = wx.BoxSizer(wx.HORIZONTAL) level_sizer.Add(level_txt, flag=wx.RIGHT, border=5) level_sizer.Add(self.level_choice) split_after_level = wx.StaticLine(self, style=wx.LI_HORIZONTAL) download_txt = wx.StaticText(self, label="Download path:") ask_radio_button = wx.RadioButton(self, label="Ask every time") path_radio_button = wx.RadioButton(self, label="Use default path") if self.current_setting["Default path mode"]: path_radio_button.SetValue(True) else: ask_radio_button.SetValue(True) self.Bind(wx.EVT_RADIOBUTTON, self.on_radio_button) change_button = wx.Button(self, label="change", size=(65, -1)) self.Bind(wx.EVT_BUTTON, self.on_change_path, change_button) set_path_sizer = wx.BoxSizer(wx.HORIZONTAL) set_path_sizer.Add(path_radio_button, flag=wx.ALIGN_CENTER_VERTICAL) set_path_sizer.Add(change_button, flag=wx.LEFT | wx.ALIGN_CENTER_VERTICAL, border=5) self.default_directory = wx.StaticText(self, label=self.current_setting["Default path"]) split_after_download = wx.StaticLine(self, style=wx.LI_HORIZONTAL) border = 10 general_sizer = wx.BoxSizer(wx.VERTICAL) general_sizer.Add(level_sizer, flag=wx.LEFT | wx.TOP, border=border) general_sizer.AddSpacer(6) general_sizer.Add(split_after_level, flag=wx.EXPAND | wx.RIGHT | wx.LEFT, border=border) general_sizer.AddSpacer(2) general_sizer.Add(download_txt, flag=wx.LEFT, border=border) general_sizer.AddSpacer(5) general_sizer.Add(ask_radio_button, flag=wx.LEFT, border=border) general_sizer.AddSpacer(3) general_sizer.Add(set_path_sizer, flag=wx.LEFT, border=border) general_sizer.AddSpacer(2) general_sizer.Add(self.default_directory, flag=wx.LEFT, border=25) general_sizer.AddSpacer(6) general_sizer.Add(split_after_download, flag=wx.EXPAND | wx.RIGHT | wx.LEFT, border=border) self.SetSizer(general_sizer)
def __init__(self, provider, url): ProviderAtom.__init__( self, provider, url ) self.username = re.search(r'/traveller/([^/]+)', self.url).group(1) self.name = "Dopplr / %s"%self.username self.response = None self.fail = None self.token = NSUserDefaults.standardUserDefaults().stringForKey_("dopplrToken") if not self.token: return url = "https://www.dopplr.com/api/traveller_info.js?token=%s&traveller=%s"%( self.token, self.username ) Cache.getContentOfUrlAndCallback( callback = self.gotDopplrData, url = url, timeout = 3600, wantStale = True, failure = self.failed )
def getSocialGraphFor( self, url, more_urls = [] ): if not re.match(r'http', url): return if url in Extractor.SOCIAL_GRAPH_CACHE: print_info("using cached social graph data") self.addClues( Extractor.SOCIAL_GRAPH_CACHE[url], more_urls ) return api = "http://socialgraph.apis.google.com/lookup?pretty=1&fme=1&edo=1&edi=1" api += "&q=" + quote( url, '' ) print_info("Social graph API call to " + api ) # TODO _ respect more_urls here Cache.getContentOfUrlAndCallback( callback = self.gotSocialGraphData, url = api, timeout = 3600 * 48 ) # huge timeout here
def getParsedRenderedSite(url): site = Cache.loadRenderedSiteFromCache( ) #check if fully rendered page already downloaded if site: return BeautifulSoup(site, 'html.parser') #parse if already exists else: driver = webdriver.Chrome( './pokemon_scraper/chromedriver' ) #selenium webdriver automates web interaction, must have chrome installed driver.get(url) #run website and render content Cache.writeRenderedSiteToCache(driver.page_source) return BeautifulSoup( driver.page_source, 'html.parser' ) #take rendered HTML doc and parse with beautiful soup
def main(): if len(sys.argv) == 4: file = open(sys.argv[1]) if file: if sys.argv[2].lower() == "lru": try: cacheSize = int(sys.argv[3]) cache = Cache(cacheSize) cache.politicaLRU(file) except ValueError: print "Favor ingresar un numero en el parametro cache size" elif sys.argv[2].lower() == "clock": try: cacheSize = int(sys.argv[3]) cache = Cache(cacheSize) cache.politicaClock(file) except ValueError: print "Favor ingresar un numero en el parametro cache size" else: print "Lo ingresado en el parametro <politica> es incorrecto" else: print "Archivo ingresado en parametro <file> no encontrado" else: print "Cantidad de parametros incorrectos favor ingresar de la siguiente forma" print "python cacheSimulator.py <file> <politica> <cacheSize>"
def main(): cache = Cache() print("ESCREVA O COMANDO SEGUINDO O FORMATO:\nread endereco\nwrite endereco valor\nshow\nsair\n") while True: entrada = list(map(str, input().split())) command = entrada[0] if command.casefold() == 'read': cache.read(int(entrada[1])) if command.casefold() == 'write': try: cache.write(int(entrada[1]), int(entrada[2])) except IndexError: print("VALOR NÃO INFORMADO") continue if command.casefold() == 'show': cache.show() if command.casefold() == 'exit' or command.casefold() == 'sair': return else: print("Comando não encontrado")
def emTraining(sentences, sTest): print 'Beginning EM training...' globalStart = time.time() stTable = initStTable(sentences) epsilon = estimateEpsilon(sentences) if model is 2: alignCj, alignC = initAligns(sentences) unifAlignP = 1.0 / len(alignCj) #alignProbs = {align:unifAlignP for align in alignCj} alignProbs = dict(zip(alignCj, [unifAlignP] * len(alignCj))) else: alignProbs = None likelihoodCache = Cache.Cache(runType + '.likelihood', []) i = 0 while i < iterations: print "Iteration " + str(i) start = time.time() stCache = Cache.Cache('stTable.' + runType + '.iter' + str(i), [], True) if not stCache.cache: if model is 1: counts = collectCounts(sentences, stTable) stTable = translationTable(counts) else: counts, alignCj, alignC = collectCounts( sentences, stTable, alignProbs) stTable = translationTable(counts) alignProbs = alignments(alignCj, alignC) stCache.cache = stTable if i is 0 or (i + 1) % 5 is 0: if not evaluate: stCache.save() #else: # stTable = stCache.cache viterbiFile = 'Output/' + runType + '.viterbi.iter' + str(i) outputViterbi(sTest, stTable, viterbiFile, alignProbs) likelihood = logLikelihood(sentences, stTable, epsilon, alignProbs) likelihoodCache.cache.append(likelihood) if not evaluate: likelihoodCache.save() i += 1 if evaluate: outputViterbi(sentences, stTable, 'alignments.out', alignProbs) print "EMtraining finished after", iterations, "iterations in", getDuration( globalStart, time.time()), "."
def main(): instruction_cache = cache.Cache(16, 1, 1024) data_cache = cache.Cache(16, 8, 256) trace_addresses = cacheFunctions.read_trace_from_file() print("Starting timer now") start = time() cacheFunctions.analyse_the_trace(trace_addresses, instruction_cache, data_cache) end = time() print("Timer ended..") time_taken = end - start print_final_results(time_taken, instruction_cache, data_cache)
def callComponent(self, callProtocol, name, argDict, cache, compType, srcModTime): """ a protocol for component calls where the actual component called is the first encountered with a given base name, starting at the specified path and ascending upwards until the cascade root (by default, /). """ # parse name to find if a root is specified. i=name.find('...') if i==-1: root='/' path=name else: root=name[:i] or '/' path=name[i+3:] path=rectifyRelativeName(path) root=rectifyRelativeName(root) # this will raise an exception if not found comp=Cache._findPath(path, root) return defaultHandler.callComponent(None, comp, argDict, cache, compType, srcModTime)
def getVocabularies(sentences, sFile, tFile): start = time.time() srcVoc = Cache.Cache(sFile + '.voc', []) tarVoc = Cache.Cache(tFile + '.voc', []) if not srcVoc.cache or not tarVoc.cache: for sSnt, tSnt in sentences: for s in sSnt: if not s in srcVoc.cache: srcVoc.cache.append(s) for t in tSnt: if not t in tarVoc.cache: tarVoc.cache.append(t) srcVoc.save() tarVoc.save() print 'Vocabularies obtained in', getDuration(start, time.time()) return srcVoc.cache, tarVoc.cache
def Analyze(cache_filepath): # Load the cache. data = Cache.Load(cache_filepath) # Choose two random indices. random.seed() i1 = random.randint(0, len(data)) i2 = random.randint(0, len(data)) # Get the random data points. d1 = data[i1] d2 = data[i2] # Get the list of prime fibonacci numbers. FIBS = [ 89, 233, 1597, 28657, 514229, 433494437L, 2971215073L, 99194853094755497L ] # Try all combinations of fibonacci numbers. for fib1 in FIBS: for fib2 in FIBS: nmod1 = (d1['input'] * (fib1 + d1['name']) - d1['output']) nmod2 = (d2['input'] * (fib2 + d2['name']) - d2['output']) print(fractions.gcd(nmod1, nmod2))
def RAZ(self): self.pc = 0 self.regs = [0 for k in range(self.n_reg)] self.Cach.Mem.data = [None for k in range(self.n_mem)] S = self.Cach.S B = self.Cach.B self.Cach.Cache = [] for k in range(S): self.Cach.Cache += [C.Bloc(0, None, B)] self.pc = 0 self.instrNum = 0 self.reg1 = 0 self.reg2 = 0 self.imm = 0 self.o = 0 self.a = 0 self.n = 0 self.c_cycle = 0 self.instruction = '' self.running = 1 # voir si nécess ou non.
def LexicalAnalyze(Sentence, schema = "full"): try: logging.debug("-Start LexicalAnalyze: tokenize") Sentence = invalidchar_pattern.sub(u'\uFFFD', Sentence) if Sentence in Cache.SentenceCache: Dag = DependencyTree.DependencyTree() Dag.transform(Cache.SentenceCache[Sentence]) #return Cache.SentenceCache[Sentence], Dag, None # assume ResultWinningRules is none. ResultNodeList, Dag, ResultWinningRules = LexicalAnalyzeTask(Sentence, schema) if schema == "full" and utils.runtype != "debug" and utils.DisableDB is False: if len(Cache.SentenceCache) < utils.maxcachesize: Cache.SentenceCache[Sentence] = ResultNodeList Cache.WriteSentenceDB(Sentence, ResultNodeList) # if ParserConfig.get("main", "runtype").lower() == "debug": # t = Thread(target=Cache.WriteWinningRules_Async, args=(Sentence, ResultWinningRules)) # t.start() #Cache.WriteWinningRules(Sentence, ResultWinningRules) logging.debug("-End LexicalAnalyze") except Exception as e: logging.error("Overall Error in LexicalAnalyze({}) :".format(Sentence)) logging.error(e) logging.error(traceback.format_exc()) return None, None, None return ResultNodeList, Dag, ResultWinningRules
def test_cache_replacement_LRU(self): sa_cache = Cache.Cache(2, 8, "lru") data = [(x, x**2) for x in range(11)] collision = dict() for i in range(8, 11): collision[data[i][0]] = [] for i in range(8): sa_cache.put(data[i][0], data[i][1]) for j in range(8, 11): if sa_cache._get_set_num(data[i][0]) == sa_cache._get_set_num( data[j][0]): collision[data[j][0]].append(data[i][0]) sa_cache.put(data[8][0], data[8][1]) self.assertIsNone(sa_cache.get(collision[data[8][0]][0])) self.assertEqual(data[8][1], sa_cache.get(data[8][0])) # update timestamp by accessing through get, check if it is not replaced sa_cache.get(collision[data[9][0]][0]) sa_cache.put(data[9][0], data[9][1]) self.assertIsNotNone(sa_cache.get(collision[data[9][0]][0])) self.assertEqual(data[9][1], sa_cache.get(data[9][0])) # update timestamp by accessing through update, check if it is not replaced sa_cache.get(collision[data[10][0]][0]) sa_cache.put(data[10][0], data[10][1]) self.assertIsNotNone(sa_cache.get(collision[data[10][0]][0])) self.assertEqual(data[10][1], sa_cache.get(data[10][0]))
def _renderComponentAndCache(name, argDict, auxArgs, compType, srcModTime, cached): DEBUG(COMPONENT, "_renderComponentAndCache") try: mashed = argDict.copy() (out, cache_exp_time) = _realRenderComponent(name, argDict, auxArgs, compType, srcModTime) mashed.update(auxArgs) Cache.putCachedComponent(name, mashed, out, cache_exp_time) except: if cfg.Configuration.fallbackToCache and cached: DEBUG(COMPONENT, "execution explosion and fallback active") Error.logException() DEBUG(COMPONENT, "After logexc") return cached.out raise return out
def fetch(self, data='', page=''): if not page: page = self._page post_url = self.BASE_URL + '/gdweb/CombinationScarch.aspx' get_url = self.BASE_URL + '/gdweb/ScarchList.aspx?page='+str(page) if not data: data = self.rule.make() cache_key = 'search' + ':' + data + ':' + str(page) html = Cache.get(cache_key) if not html: r = Request() r.post(post_url, data) html = r.get(get_url) Cache.set(cache_key, html, 3*30*24*60*60) self._html = html return self
def setCache(self, S, E, B, m): self.mem.extend([0]*((1>>m)-self.size)) # self.cache.S = S # self.cache.E = E # self.cache.B = B # self.cache.m = m self.cache = Cache(S, E, B, m)
def __init__(self, *args): # Default values for some important options... self.ip_cache = {} self.cache = Cache.Cache() self.associations = Associations.Associations() self.setDefaultOpts() self.greenLight()
def insert_raw(self, item): '''insert a new item into the cache return the information (stamp, hash) on success None otherwise item -- a file like object containing an image ''' if item is None: return None position = item.tell() item.seek(0) hash_ = Cache.get_file_hash(item) if hash_ is None: return None path = os.path.join(self.path, hash_) last_path = os.path.join(self.path, 'last') item.seek(0) handle = file(path, 'w+b', 0700) handle.write(item.read()) handle.close() shutil.copy2(path, last_path) item.seek(position) return self.__add_entry(hash_)
def getFeedUrl(self): # it's very unlikely that the feed source will move # TODO - check stale cache first. Man, the feed provider is too complicated. special = self.specialCaseFeedUrl( self.url ) # return None to mean 'no special case', blank string to mean "no feed here" if special != None: if len(special) > 0: print_info("special-case feed url %s"%special) self.getFeed( special ) else: # bad feed self.dead = True self.changed() return Cache.getContentOfUrlAndCallback( callback = self.gotMainPage, url = self.url, timeout = self.timeout() * 10, wantStale = False, failure = self.failed ) # TODO - use stale version somehow
def __init__(self, parent): wx.Panel.__init__(self, parent) explain_txt = wx.StaticText(self, label="Past Paper Crawler caches viewed web pages to memory \nand disk to boost efficiency.") hint_txt = wx.StaticText(self, label="Current cache on the disk: ") open_button = wx.Button(self, label="open folder") self.Bind(wx.EVT_BUTTON, self.on_open, open_button) self.cache_folder = Cache.customized_directory() cache_list = sorted([file for file in os.listdir(self.cache_folder) if not file.startswith(".")]) self.cache_checklist = wx.CheckListBox(self, choices=cache_list, size=(0, 295)) open_cache_sizer = wx.BoxSizer(wx.HORIZONTAL) open_cache_sizer.Add(hint_txt, flag=wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, border=5) open_cache_sizer.Add(open_button, flag=wx.ALIGN_CENTER_VERTICAL) select_all_button = wx.Button(self, label="Select all") self.Bind(wx.EVT_BUTTON, self.on_select_all, select_all_button) remove_button = wx.Button(self, label="Remove") self.Bind(wx.EVT_BUTTON, self.on_remove, remove_button) button_sizer = wx.BoxSizer(wx.HORIZONTAL) button_sizer.Add(select_all_button) button_sizer.Add(remove_button, flag=wx.LEFT, border=8) cache_sizer = wx.BoxSizer(wx.VERTICAL) cache_sizer.Add(explain_txt, flag=wx.ALL, border=10) cache_sizer.Add(open_cache_sizer, flag=wx.BOTTOM | wx.LEFT | wx.RIGHT, border=10) cache_sizer.Add(self.cache_checklist, flag=wx.EXPAND | wx.BOTTOM | wx.LEFT | wx.RIGHT, border=10) cache_sizer.Add(button_sizer, flag=wx.ALIGN_RIGHT | wx.BOTTOM | wx.LEFT | wx.RIGHT, border=10) self.SetSizer(cache_sizer)
def test_put(self): sa_cache = Cache.Cache(2, 8) data = (3, 5) sa_cache.put(data[0], data[1]) tag = sa_cache._custom_hash( tuple([ sa_cache._custom_hash(data[0]), sa_cache._custom_hash(data[1]) ])) self.assertIn( [tag, data[1]], [[entry[0], entry[1]] for entry in sa_cache._cache[sa_cache._get_set_num(data[0])]]) data2 = (-62, 172) sa_cache.put(data2[0], data2[1]) tag2 = sa_cache._custom_hash( tuple([ sa_cache._custom_hash(data2[0]), sa_cache._custom_hash(data2[1]) ])) self.assertIn( [tag, data[1]], [[entry[0], entry[1]] for entry in sa_cache._cache[sa_cache._get_set_num(data[0])]]) self.assertIn( [tag2, data2[1]], [[entry[0], entry[1]] for entry in sa_cache._cache[sa_cache._get_set_num(data2[0])]])
def showRoom(year, month, date, room): if (not re.match('^[a-zA-Z]{1,2}\\d+[a-zA-Z]?$', room)): return jsonify(errMsg='Invalid room'), 500 try: scheduleList = Cache.readCache(year, month, date) except IOError: html = Fetcher.fetch_html(int(year), int(month), int(date), int("0830"), int("2200")) scheduleList = Fetcher.parseHTML2List(html) Cache.saveCache(year, month, date, scheduleList) response = Scheduler.selectRoom(scheduleList, room) res = jsonify(response) res.mimetype = 'application/json' res.headers['Access-Control-Allow-Origin'] = '*' return res
def __init__(self): ##https://developers.facebook.com/tools/explorer/ user_token = "CAACEdEose0cBAO9m1ZChz3qqjqcS3HBlerOSd3wteZC7EqQYlcbngZCmQtvhKQmMmE1sORZAzZA07PboTUXgskIcZBFKeA05FpASH2hEoZCW4im9ZCuNLAlgnOHc00YM5tZByxLCZBo33JkGmq4aPFDNil7FnJGCI7dxgJVwd1ZApkurkCB8WGdZBSJTdNc6zFoCUXFHAKZCZC8OAF6RSV0ljZAw9TA" extended_access_token = "CAABfbgndG3ABAOGqO5oQ1HwqVOYrlZB6CofIOciVPgMFC4zIRRk7wJvjrZBTIpFlJ3eTZA72fs4UKmyPgasMZA6MEtAaCSegvAju2zsUXAgaTRCxAfFjwrh9x8ZBLJ4lRlEHVWg0m6ZAWk9mMWdYpAVc27cIZCwi5IXO4t0U2fWkVzbAn0UrWJS" self.graph = GraphAPI(extended_access_token) self.fb_cache = Cache(FACEBOOK_CACHE,INPUT_LANGUAGE) self.fb_cache.load() self.sleep_count = 0
def _rectifyRelativeName( name ): #if no previous components, or it's absolute, just return it as is if topOfComponentStack == -1: return '/' + name elif name[0] == '/': return name path, fname = os.path.split( componentStack[topOfComponentStack].name ) return Cache._normpath("%s/%s" % (path, name))
class DuckDuckGoWordOccurrenceClassifier: def __init__(self): self.duckduckgo_search = DuckduckgoSearch(False) self.word_occurrence_classifier = WordOccurrenceClassifier() self.cache = Cache(DUCKDUCK_WORD_OCCURRENCE_CACHE, INPUT_LANGUAGE) self.cache.load() def classify(self, term): cache_result = self.cache.search_cache(term) if cache_result is not None: return self.word_occurrence_classifier.normalize_results(ClassificationResult(term, cache_result.Matches)) try: search_result = self.duckduckgo_search.general_search(term) except Exception, e: return ClassificationResult(term, {key: -1 for key in categories}) result = self.word_occurrence_classifier.calculate_score(term, search_result) self.cache.update_cache(term, result) return self.word_occurrence_classifier.normalize_results(result)
def getCurrentMonth(username, password): # Try to return from cache cachekey = 'metricsathome.Data.iiNet-(' + iiNetCM + ',' + username + ')' iinetusage = Cache.read(cachekey) if iinetusage is not None: return iinetusage # Fall back to the net url = iiNetCM + '&username='******'&password='******'error')): raise Exception('iiNet API Error: ' + xmlDoc.getElementsByTagName('error')[0].firstChild.nodeValue) result = {} processQuotaReset(xmlDoc.getElementsByTagName('quota_reset')[0], result) processExpectedTraffic(xmlDoc.getElementsByTagName('expected_traffic_types')[0], result) processDailyUsage(xmlDoc.getElementsByTagName('day_hour'), result) # Save in cache Cache.write(cachekey, result, 7200) return result
class CompanyDuckDuckClassifier: def __init__(self): self.duckduckgo_search = DuckduckgoSearch(True) self.company_postfix = ['corp', 'corporation', 'company', 'inc', 'headquarters'] self.cache = Cache(DUCKDUCK_COMPANY_CACHE, INPUT_LANGUAGE) self.min_hits_to_match = DUCKDUCK_COMPANY_MIN_HITS_TO_MATCH self.cache.load() def classify(self, term): cache_result = self.cache.search_cache(term) if self.cache.search_cache(term) is not None: return self.normalize_results(ClassificationResult(term, cache_result.Matches)) result = ClassificationResult(term) for company_word in self.company_postfix: term_to_search = term + ' ' + company_word try: if self.duckduckgo_search.general_search(term_to_search) != '': result.Matches['company'] += 1 except Exception, e: return ClassificationResult(term, {key: -1 for key in categories}) self.cache.update_cache(term, result) return self.normalize_results(result)
def getData(self, aac, xml): "Get XML codes here: http://www.bom.gov.au/info/precis_forecasts.shtml" forecasts = Cache.read(BOM.modcachekey + '-data(' + aac + ',' + xml + ')') if forecasts is not None: return forecasts wdom = parse(urllib2.urlopen(self._xmlbase + xml)) areas = wdom.getElementsByTagName('area') area = filter(lambda a: a.attributes['aac'].value == aac, areas)[0] fcasts = filter(lambda a: a.attributes != None, area.childNodes) forecasts = [] for f in fcasts: values = {} values['date'] = dateutil.parser.parse(f.attributes['end-time-local'].value) - timedelta(days=1) for e in f.getElementsByTagName('element'): values[e.attributes['type'].value] = e.firstChild.nodeValue for e in f.getElementsByTagName('text'): values[e.attributes['type'].value] = e.firstChild.nodeValue forecasts.append(values) forecasts = sorted(forecasts, key=lambda f: f['date']) Cache.write(BOM.modcachekey + '-data(' + aac + ')', forecasts, 3600) return forecasts
def get_cache(hostinfo, req_path): """ req_path is a URL path ref including query-part, the backend will determine real cache location """ # Prepare default cache location cache_location = '%s:%i/%s' % (hostinfo + (req_path,)) cache_location = cache_location.replace(':80', '') cache = Cache.load_backend_type(Params.CACHE)(cache_location) Params.log("Init cache: %s %s" % (Params.CACHE, cache), 3) Params.log('Prepped cache, position: %s' % cache.path, 2) # XXX: use unrewritten path as descriptor key, need unique descriptor per resource cache.descriptor_key = cache_location return cache
def insert(self, item): '''insert a new item into the cache return the shortcut and the hash on success None otherwise item -- a tuple containing the shortcut and the path to an image ''' shortcut, path = item hash_ = Cache.get_file_path_hash(path) if hash_ is None: return None new_path = os.path.join(self.path, hash_) shutil.copy2(path, new_path) return self.__add_entry(shortcut, hash_)
def __init__(self, **kwargs): """Initialise the service. If guid is not provided, one will be requested (returned in the callback). Pass callback= or error= to receive notification of readiness.""" self.guid = Cache.read('metricsathome.Data.YarraTrams-guid') imp = Import('http://www.w3.org/2001/XMLSchema') imp.filter.add('http://www.yarratrams.com.au/pidsservice/') doctor = ImportDoctor(imp) self.client = Client(url, doctor=doctor) if self.guid is None: self.guid = self.client.service.GetNewClientGuid() Cache.write('metricsathome.Data.YarraTrams-guid', self.guid, 2592000) # Keep for 1 month of no use headers = self.client.factory.create('PidsClientHeader') headers.ClientGuid = self.guid headers.ClientType = 'DASHBOARDWIDGET' headers.ClientVersion = '1.0' headers.ClientWebServiceVersion = '6.4.0.0' self.client.set_options(soapheaders=headers)
def insert(self, item): '''insert a new item into the cache return the information (stamp, hash) on success None otherwise item -- a path to an image ''' hash_ = Cache.get_file_path_hash(item) if hash_ is None: return None path = os.path.join(self.path, hash_) last_path = os.path.join(self.path, 'last') shutil.copy2(item, path) shutil.copy2(item, last_path) return self.__add_entry(hash_)
def init(self, path): Params.log("FileTree.init %r" % path, 5) path2 = path if Params.ARCHIVE: path2 = time.strftime( Params.ARCHIVE, time.gmtime() ) + path2 path2 = os.path.join(Params.ROOT, path2) if len(path2) >= Params.MAX_PATH_LENGTH: sep = Cache.min_pos(path2.find('#'), path2.find( '?' )) if sep != -1: if (len(path2[:sep])+34) < Params.MAX_PATH_LENGTH: FileTreeQH.init(self, path) else: PartialMD5Tree.init(self, path) else: FileTreeQ.init(self, path)
def buildOverlay(self, code, layers=deflayers): cachekey = BOM.modcachekey + '-overlay(' + code + ',(' + ','.join(layers) + '))' background = Cache.read(cachekey) if background is not None: return Image.open(StringIO(background)) im = Image.new('RGBA', (512, 564), (255, 255, 255, 0)) ftp = FTP(self._ftphost) ftp.login() ftp.cwd(self._transparentcypath) files = ftp.nlst('IDR' + code + '.*.png') files += ftp.nlst('IDR.*.png') alllayers = {} for f in files: b = f.find('.') + 1 e = f.rfind('.') alllayers[f[b:e]] = f for l in layers: imgout = StringIO() ftp.retrbinary('RETR ' + alllayers[l], imgout.write) imgin = StringIO(imgout.getvalue()) img = Image.open(imgin).convert('RGBA') im.paste(img, (0,0), img) imgout.close() imgin.close() imgout = StringIO() im.save(imgout, format='PNG') Cache.write(cachekey, imgout.getvalue(), 604800) # cache for a week imgout.close() return im
def __init__(self, path, text="", size=32, shadow=True): self.font = pygame.font.Font(os.path.join("..", "data", "fonts", path), int(size)) self.texture = Texture() self.cache = Cache() # attributes self.scale = (1.0, 1.0) # image bounds (width, height) self.position = (0, 0) # where in the window it should render self.angle = 0 # angle which the image is drawn self.color = (255, 255, 255, 255) # colour of the image self.rect = (0.0, 0.0, 1.0, 1.0) # left, top, right, bottom, crops the texture self.alignment = 1 # alignment of the text (left, center , right) self.shadow = True # does the font project a shadow self.text = None self.setText(text) # it is not necessary to enter a string upon initialization,
def insert_raw(self, item): '''insert a new item into the cache return the information (stamp, hash) on success None otherwise item -- a tuple containing the shortcut and a file like object with the image ''' shortcut, image = item position = image.tell() image.seek(0) hash_ = Cache.get_file_hash(image) if hash_ is None: return None path = os.path.join(self.path, hash_) self.create_file(path, image) image.seek(position) return self.__add_entry(shortcut, hash_)
def insert_resized(self, item, filename, height = 50, width = 50): '''insert a new item into the cache with the specified filename resizing it if posible. return the shortcut and the hash on success None otherwise item -- a tuple containing the shortcut and the path to an image ''' shortcut, image = item path = os.path.join(tempfile.gettempdir(), "emote") # save the incoming file so we can resize it using imagemagick self.create_file(path, image) self.resize_with_imagemagick(path, path, height, width) hash_ = Cache.get_file_path_hash(path) if hash_ is None: return None new_path = os.path.join(self.path, filename) shutil.copy2(path, new_path) return self.__add_entry(shortcut, filename)
def __init__(self): self.u_cache = Cache() self.u_vlc = [] ivtile = 0 for iarch in range(Architecture.getCfgByName("threadnum")): self.u_vlc.append(VLC()) self.u_vlc[iarch].initialize() self.u_PCTracer = [] for iarch in range(Architecture.getCfgByName("threadnum")): self.u_PCTracer.append(PCTracer(iarch, ivtile)) self.u_PCTracer[iarch].initialize(GlobalVar.allcontents_trc[int(Architecture.getCfgByName("threadlist")[iarch])]) self.u_cache.initialize() self.PCTracer_nextState = [] self.PCTracer_state = [] self.PCTracer_counter = [] self.VLC_nextState = [] self.VLC_state = [] # self.VLC_counter = [] self.current_CYC = [] self.cur_abstime = [] self.cur_clock = [] self.cur_PC = [] for iarch in range(Architecture.getCfgByName("threadnum")): self.PCTracer_nextState.append(0) self.PCTracer_state.append(0) self.PCTracer_counter.append(0) self.VLC_nextState.append(0) self.VLC_state.append(0) # self.VLC_counter.append(0) self.current_CYC.append(0) self.cur_abstime.append(0) self.cur_clock.append(0) self.cur_PC.append(0)
def getRadarLoop(self, code): rdrimgs = [] cachekey = BOM.modcachekey + '-radarloop(' + code + ')' rdrloopstr = Cache.read(cachekey) if rdrloopstr is not None: for s in rdrloopstr: rdrimgs.append(Image.open(StringIO(s)).convert('RGBA')) return rdrimgs background = self.buildOverlay(code, BOM.backlayers) foreground = self.buildOverlay(code, BOM.forelayers) ftp = FTP(self._ftphost) ftp.login() ftp.cwd(self._radarpath) files = ftp.nlst('IDR' + code + '.T.*.png') for f in files: ftpcachekey = BOM.modcachekey + '-ftp://' + self._ftphost + self._radarpath + f imgstr = Cache.read(ftpcachekey) imageio = None if imgstr is not None: imageio = StringIO(imgstr) else: imgget = StringIO() ftp.retrbinary('RETR ' + f, imgget.write) Cache.write(ftpcachekey, imgget.getvalue(), 10800) imageio = StringIO(imgget.getvalue()) imgget.close() try: rdrtrans = Image.open(imageio).convert('RGBA') im = Image.new('RGBA', (512, 564), (255, 255, 255, 0)) im.paste(background, (0, 0), background) im.paste(rdrtrans, (0, 0), rdrtrans) im.paste(foreground, (0, 0), foreground) rdrimgs.append(im) except IOError: print 'WARNING: BOM is including bad images again'; rdrloopstr = [] for i in rdrimgs: imgout = StringIO() i.save(imgout, format='PNG') rdrloopstr.append(imgout.getvalue()) Cache.write(cachekey, rdrloopstr, 360) return rdrimgs
def init(self, path): Params.log("FileTreeQ.init %r" % path, 5) psep = Params.ENCODE_PATHSEP # encode query and/or fragment parts sep = Cache.min_pos(path.find('#'), path.find( '?' )) # sort query vals and turn into dirs if sep != -1: if psep: path = path[:sep] + path[sep:].replace('%2F', psep) path = path[:sep] + path[sep:].replace('/', psep) if '&' in path[sep:]: parts = path[sep+1:].split('&') elif ';' in path[sep:]: parts = path[sep+1:].split(';') else: parts = [path[sep+1:]] if Params.FileTreeQ_SORT: parts.sort() while '' in parts: parts.remove('') path = path[ :sep+1 ] if parts: path = path + '/' + '/'.join(parts) # optional removal of directories in path if psep: if sep == -1 or Params.FileTreeQ_ENCODE: # entire path path = path.replace( '/', psep) else: # URL path-part only path = path[:sep].replace( '/', psep) + path[sep:] # make archive path if Params.ARCHIVE: path = time.strftime( Params.ARCHIVE, time.gmtime() ) + path self.path = os.path.join(Params.ROOT, path) self.file = None