def setTagsCloudMatchingIblsSets(self, pattern, flags=re.IGNORECASE): """ This method sets the pattern matching Ibl Sets and updates :mod:`sibl_gui.components.core.iblSetsOutliner.iblSetsOutliner` Component Model content. :param pattern: Filtering pattern. ( String ) :param flags: Regex filtering flags. ( Integer ) :return: Method success. ( Boolean ) """ LOGGER.debug("> Filtering Ibl Sets by Tags.") patternsDefault = (".*", ) patternTokens = pattern.split() or patternsDefault allTags = set() filteredIblSets = [] iblSets = self.__collectionsOutliner.getCollectionsIblSets( self.__collectionsOutliner.getSelectedCollections() or self.__collectionsOutliner.getCollections()) for iblSet in iblSets: comment = getattr(iblSet, "comment") if not comment: continue tagsCloud = foundations.strings.filterWords( foundations.strings.getWords(comment), filtersOut=self.__cloudExcludedTags, flags=flags) patternsMatched = True if patternTokens != patternsDefault: for pattern in patternTokens: patternMatched = False for tag in tagsCloud: if re.search(pattern, tag, flags=flags): patternMatched = True break patternsMatched *= patternMatched if patternsMatched: allTags.update(tagsCloud) filteredIblSets.append(iblSet) self.__view.clear() self.__view.addItems(sorted(allTags, key=lambda x: x.lower())) if Counter(filteredIblSets) != Counter(iblSets) or \ len(self.__iblSetsOutliner.getActiveView().filterNodes("IblSet", "family")) != len(iblSets): filteredIblSets = [ iblSet for iblSet in set(iblSets).intersection(set(filteredIblSets)) ] LOGGER.debug("> Tags Cloud filtered Ibl Set(s): '{0}'".format( ", ".join((foundations.strings.toString(iblSet.name) for iblSet in filteredIblSets)))) self.__iblSetsOutliner.setIblSets(filteredIblSets) return True
def variables(): global V V = { "a": Counter(2), "b": 80, "seq0": Seq(60, 72, _, 60, 75, 80, _, 60), "long": Seq(40, 50), "longC": Counter(32) } speed(bpm(120, 4)) new_player("saw") toggle(1, "lpf") pan(0) dur(0.5) delt(0.125) lpf(6000) cut(0) new_player("sin") dur(0.125) toggle(1, "lpf") toggle(0, "reverb") pan(0) lpf(10000) cut(0)
def batch_ping(host): # Start the counters timestamp = datetime.datetime.now() counter = Counter("DATA") counter_err = Counter("ERROR") last_min = timestamp.minute this_min = last_min error_details = [] while this_min == last_min: time.sleep(1.0) timestamp = datetime.datetime.now() this_min = timestamp.minute rt, err_full = ping(host) if err_full: print("ERROR @ {}: {}".format(timestamp.__str__(), err_full)) err_type = "UNKNOWN" if "Request timed out" in err_full: err_type = "TIMEOUT" if "Ping request could not find host" in err_full: err_type = "NO HOST" error_details.append( [timestamp.__str__(), host, err_type, err_full]) counter_err.add(1) # increment the error count counter.add(0) # add a big fat zero to the latency counter else: counter.add(rt) counter.show() if counter_err.count() > 0: counter_err.show() return counter, counter_err.count(), error_details
def profileSort(sortFunction, upper=10): """Displays counts of comparisons and swaps and running times for the given sort algorithm on several data sizes. The data size starts at 1 and doubles on each iteration. An initial run shows results for a sorted list of 256 numbers. Arguments: the sort function and the number of data sets.""" comps = Counter() swaps = Counter() lyst = list(range(1, 257)) t1 = time.time() sortFunction(lyst, comps, swaps) t2 = time.time() - t1 print("Results for a sorted list of 256 numbers:") print("Comparisons: ", comps) print("Swaps: ", swaps) print("Time in seconds:", t2) size = 1 print("\n Size Comparisons Swaps Running Time (sec)") for i in range(1, upper + 1): comps.reset() swaps.reset() lyst = getRandomList(size) t1 = time.time() sortFunction(lyst, comps, swaps) t2 = time.time() - t1 print("%5d%12s%12s%15.6f" % (size, comps, swaps, t2)) size *= 2
def test_iadd_imul_comprehensive(self): # Testing default values for in-place ops for operation in ("__iadd__", "__imul__"): foo = Counter() foo.default = float("-inf") foo['a'] = 1.0 bar = Counter() bar.default = float("-inf") bar['b'] = 2.0 foofunc = getattr(foo, operation) barfunc = getattr(bar, operation) orig = copy(foo) orig.default = foo.default orig2 = copy(foo) orig2.default = foo.default foofunc(bar) barfunc(orig) # No side effects self.failUnless(orig == orig2) # Transitivity self.failUnless(foo == bar, "%s != %s" % (foo, bar)) # Test that the values are correct self.failUnless(bar['a'] == float("-inf")) self.failUnless(bar['b'] == float("-inf")) if operation == "__iadd__": val = float("-inf") elif operation == "__imul__": val = float("inf") self.failUnless(bar['missing'] == val)
def __init__(self): #set up the list which holds the counter objects self.myTime = [] self.myTime.append(Counter("Sec", 60)) self.myTime.append(Counter("Min", 60)) self.myTime.append(Counter("Hrs", 24)) self.myTime.append(Counter("Days", 7))
def setUp(self): self.all_spam = Counter() self.all_spam['spam'] = 2 self.half_spam = Counter() self.half_spam['spam'] += 1 self.half_spam['ham'] += 1
def learn_from_treebanks(self, treebanks): self.sym_count = Counter() self.unary_count = Counter() self.binary_count = Counter() self.words_count = Counter() for treebank in treebanks: for s in open(treebank): self.__count(loads(s)) # Words for word, count in self.words_count.iteritems(): if count >= PCFG.RARE_WORD_COUNT: self.well_known_words.add(word) # Normalise the unary rules count norm = Counter() for (x, word), count in self.unary_count.iteritems(): norm[(x, self.norm_word(word))] += count self.unary_count = norm # Q1 for (x, word), count in self.unary_count.iteritems(): self.q1[x, word] = self.unary_count[x, word] / self.sym_count[x] # Q2 for (x, y1, y2), count in self.binary_count.iteritems(): self.q2[x, y1, y2] = self.binary_count[x, y1, y2] / self.sym_count[x] self.__build_caches()
def test_add_mul_comprehensive(self): # Testing default values for operation in ("__add__", "__mul__"): foo = Counter() foo.default = float("-inf") foo['a'] = 1.0 bar = Counter() bar.default = float("-inf") bar['b'] = 2.0 foofunc = getattr(foo, operation) barfunc = getattr(bar, operation) # Transitivity self.failUnless( foofunc(bar) == barfunc(foo), "%s != %s" % (foofunc(bar), barfunc(foo))) # Test that the values are correct bob = foofunc(bar) self.failUnless(bob['a'] == float("-inf")) self.failUnless(bob['b'] == float("-inf")) if operation == "__add__": val = float("-inf") elif operation == "__mul__": val = float("inf") self.failUnless(bob['missing'] == val) # Verify that the originals are unchanged self.failUnless(foo['a'] == 1.0 and foo['missing'] == float("-inf")) self.failUnless(bar['b'] == 2.0 and bar['missing'] == float("-inf"))
def new(): global counter try: counter = Counter() if txt.get() == "" else Counter(int(txt.get())) update_label() except ValueError: pass
def test_bad_add(self): c = Counter('Test') c.register('a') c.register('b') c.inc('a') d = Counter('Test') d.register('b') self.assertRaises(ValueError, d.__iadd__, c)
def test_no_side_effects(self): foo = Counter() bar = Counter() foo['a'] += 2.0 foo['b'] += 1.0 foo2 = copy(foo) bar += foo self.failUnless(foo2 == foo)
def __init__(self, hours, minutes, seconds): self.hours = Counter( LHOURS, hours, MIN_DIGITS ) # sets self.hours to be a Counter object with limit: 24, initial value: hours, and min_digits: 2 self.minutes = Counter( LMINUTES, minutes, MIN_DIGITS ) # sets self.minutes to be a Counter object with limit: 60, initial value: minutes, and min_digits: 2 self.seconds = Counter( LSECONDS, seconds, MIN_DIGITS ) # sets self.seconds to be a Counter object with limit: 60, initial value: seconds, and min_digits: 2
def test_in_place_add(self): aadd = Counter() aadd['bob'] = 2 badd = Counter() badd['bob'] = 4 aadd += badd self.failUnless(aadd['bob'] == 6) self.failUnless(badd['bob'] == 4)
def format_labels(self, sorted_entries): labels = [self.format_label(entry) for entry in sorted_entries] count = Counter(labels) counted = Counter() for label in labels: if count[label] == 1: yield label else: yield label + chr(ord('a') + counted[label]) counted.update([label])
def setUp(self): self.features = Counter((key, 1.0) for key in ['warm', 'fuzzy']) self.weights = CounterMap() self.weights['dog'] = Counter({'warm': 2.0, 'fuzzy': 0.5}) self.weights['cat'] = Counter({'warm': 0.5, 'fuzzy': 2.0}) self.labels = set(self.weights.iterkeys()) self.logp = maxent.get_log_probabilities(self.features, self.weights, self.labels)
def main(): # I got a lot of this stuff from: https://RandomNerdTutorials.com # ESP32 Pin assignment i2c = I2C(-1, scl=Pin(22), sda=Pin(21)) #i2c_scan(i2c) oled_width = 128 oled_height = 64 oled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c) result = set_date_from_ntp(oled) while result == 0: time.sleep(5) result = set_date_from_ntp(oled) # Light up the neopixels set_neopixel_colors(0, 0, 64) nixie_driver = NixieDriver(25, 26, 27) counters = [Counter(nixie_driver, 0), Counter(nixie_driver, 1)] # Show today's date once calendar_display(counters) counters[1].set_count(0) counters[0].set_count(0) time.sleep(1) even = True while True: print("Days til Xmas...", end='') days = days_til_date(oled, 2020, 12, 25) print(str(days)) if days == 1 or days == 0: # alternate red and green if even: set_neopixel_colors(128, 0, 0) else: set_neopixel_colors(0, 128, 0) if even == True: even = False else: even = True countdown_display(counters, days) time.sleep(15) elif days > 0 and days <= 99: countdown_display(counters, days) time.sleep(15) else: set_neopixel_colors(0, 0, 64) calendar_display(counters) time.sleep(15)
def __init__(self, hour, minute, second): """ 각각 시, 분, 초를 나타내는 카운터 인스턴스 3개(hour, minute, second)를 정의한다. 현재 시간을 파라미터 hour시, minute분, second초로 지정한다. """ self.hour = Counter(Clock.HOURS) self.hour.set(hour) self.minute = Counter(Clock.MINUTES) self.minute.set(minute) self.second = Counter(Clock.SECONDS) self.second.set(second)
def test_sum(self): foo = Counter() bar = Counter() foo['x'] = 1.0 foo['y'] = 1.0 bar['z'] = 1.0 bar['x'] = 1.0 self.assertEqual(foo + bar, Counter({'x': 2.0, 'y': 1.0, 'z': 1.0})) self.assertEqual(sum((foo + bar).itervalues()), 4.0)
def reentrancies(self): '''Counts the number of times each variable is mentioned in the annotation beyond the one where it receives a concept. Non-reentrant variables are not included in the output.''' c = defaultdict(int) for h, r, d in self.triples(): if isinstance(d, Var): c[d] += 1 elif isinstance(d, Concept): c[h] -= 1 return Counter( c) + Counter() # the addition removes non-positive entries
def process_text(text): global next_doc_id, total_docs, total_sentences, total_chars_kept_tokens, total_tokens # label unique doc id doc_id = next_doc_id next_doc_id += 1 # get tokens from text text = text.lower() # convert to lowercase # TODO parse text, replace all instances of '[[...]]' and the like tokens = nltk.word_tokenize(text) # get tokens / types after stemming and eliminating stopwords # list of stemmed tokens, dict of found types, number of characters in orig tokens stems, types, num_chars_kept_tokens = stem_tokens_stopwords(tokens) num_types = len(types.keys()) num_kept_tokens = 0 # could do len(stems) but have to iterate through it anyway # populate term frequencies for t in stems: term_freq[(t, doc_id)] = term_freq.get((t, doc_id), 0) term_freq[(t, doc_id)] += 1 num_kept_tokens += 1 # calculate sentence stats # tokenize text into sentences sentences = sent_tokenize.tokenize(text) num_sentences = 0 # pos tag distribution num_verbs = 0 num_nouns = 0 doc_sents[doc_id] = [] # for each sentence, store (text, tag counter) doc_tags[doc_id] = Counter() #print >> sys.stderr'sentences:' for s in sentences: #print >> sys.stderrs, '\n' # compute num verbs, num nouns feature # tokenize the sentences to prep for POS tagging sent_tokens = nltk.word_tokenize(s) pos = nltk.pos_tag(sent_tokens) tag_dist = Counter([tag for word, tag in pos]) #print >> sys.stderrtag_dist #print >> sys.stderrsorted(tag_dist) doc_sents[doc_id].append((s, tag_dist)) doc_tags[doc_id].update(tag_dist) num_sentences += 1 # save stats and info for document stats[doc_id] = (num_types, num_kept_tokens, num_sentences, num_chars_kept_tokens) raw_docs[doc_id] = text docs[doc_id] = stems # list of stems, after excluding stopwords #print >> sys.stderrdocs[doc_id] # and track global stats total_docs += 1 total_sentences += num_sentences total_chars_kept_tokens += num_chars_kept_tokens total_tokens += num_kept_tokens
def test_add(self): c = Counter('Test') c.register('a') c.register('b') c.inc('a') d = Counter('Test') d.register('a') d.register('b') d.inc('a') d.inc('b') d += c self.assertEqual(d['a'], ['a', 2]) self.assertEqual(d['b'], ['b', 1])
def setUp(self): # two separate points, verify that the likelihoods are correct # and the sampling is sound self.points = dict() self.points[1] = Counter() self.points[1]['x'] = 1.0 self.points[1]['y'] = 1.0 self.points[2] = Counter() self.points[2]['x'] = 10.0 self.points[2]['y'] = 10.0 self.sampler = CRPGibbsSampler(self.points)
def testWithCounters(sort, n=15): """Runs some tests on a sort function.""" comps = Counter() swaps = Counter() lyst = list(range(1, n + 1)) print("Sorting", lyst) sort(lyst, comps, swaps) print("Result", lyst, "Comps:", str(comps), "Swaps:", str(swaps)) comps.reset() swaps.reset() lyst = getRandomList(n) print("Sorting", lyst) sort(lyst, comps, swaps) print("Result", lyst, "Comps:", str(comps), "Swaps:", str(swaps))
def test_in_place_multiply(self): amul = Counter() amul['bob'] = 2 amul['jim'] = 2 bmul = Counter() bmul['bob'] = 4 amul *= bmul bmul *= amul self.failUnless(amul['bob'] == 8) self.failUnless(bmul['bob'] == 32) self.failUnless(amul['jim'] == 0) self.failUnless(bmul['jim'] == 0)
def init_train_data(d, buy): print("train day %d" % d) if d == 31: file = open(datafile_origin) else: file = open(datafile) actions = csv.reader(file) _ = next(actions) users = {} cc = Counter(n, "prepare training data") for row in actions: cc.count_print() user_id = int(row[0]) item_id = int(row[1]) action = int(row[2]) c = int(row[4]) day = get_day(row[5]) hour = get_hour(row[5]) if day >= d: continue if user_id not in users: users[user_id] = {item_id: [Big3(), Big3(), Big3(), Big3()]} if item_id not in users[user_id]: users[user_id][item_id] = [Big3(), Big3(), Big3(), Big3()] users[user_id][item_id][action - 1].push(10 / (d * 24 - hour)) x = [] y = [] pairs = [] cc = Counter(len(users), "process training data") for user_id in users: cc.count_print() for item_id in users[user_id]: t = users[user_id][item_id] xx = [] xx.extend(t[0].get_values()) xx.extend(t[1].get_values()) xx.extend(t[2].get_values()) xx.extend(t[3].get_values()) user_item = str(user_id) + "," + str(item_id) if user_item in buy[d]: yy = 1 else: yy = 0 x.append(xx) y.append(yy) pairs.append(user_item) file.close() return x, y, pairs
def entering_play(self): self.is_tapped = False self.tapping = anim.animate(0, 0, dt=0.3) self.highlighting = anim.animate(0, 0, dt=0.2) self.zooming = anim.animate(0, 0, dt=0.2) self.pos_transition = "ease_out_circ" #"ease_out_back" self._pos.set_transition(dt=0.4, method=self.pos_transition) #self._pos.y = anim.animate(guicard._pos.y, guicard._pos.y, dt=0.4, method="ease_out") self._orientation.set_transition(dt=0.3, method="sine") self.can_layout = True if self.gamecard.types == Creature: self.setup_creature_role() # Check for counters dispatcher.connect(self.add_counter, signal=CounterAddedEvent(), sender=self.gamecard) dispatcher.connect(self.remove_counter, signal=CounterRemovedEvent(), sender=self.gamecard) dispatcher.connect(self.type_modified, signal=TypesModifiedEvent(), sender=self.gamecard) self.counters = [ Counter(counter.ctype) for counter in self.gamecard.counters ] self.layout_counters()
def test_zero_weight(self): weights = CounterMap() weights['dog'] = Counter({'warm': 2.0}) labels = set(weights.iterkeys()) logp = maxent.get_log_probabilities(self.features, weights, labels) self.assertEqual(logp['dog'], 0.0)
def test_extraneous_label(self): weights = CounterMap() weights['dog'] = Counter({'warm': 2.0, 'fuzzy': 0.5}) labels = set(weights.iterkeys()) logp = maxent.get_log_probabilities(self.features, weights, labels) self.assertEqual(logp['cat'], float('-inf'))
def __get_stats(self, fname): path = os.path.join(self.__work_dir, fname) stats = Counter() try: for line in open(path, "r"): try: line = line.strip() parts = line.split(":") name = parts[0] count = parts[1] try: date = ':'.join(parts[2:]) except Exception: date = None stats[name] = CounterRecord(int(count), date) #debug("stats[%s] = %s", name, stats[name]) except Exception, e: ##debug(e) pass except IOError, e: if e.errno == errno.ENOENT: debug("%s does not exist", fname) else: print e