def __init__(self, source: Seq2Seq, src_counter: collections.Counter, trg_counter: collections.Counter, use_cuda): self.source = source src_counter = collections.Counter( {source.src_voc.itos[i]: c for i, c in src_counter.items()}) self.src_voc = Vocab( src_counter, specials=source.src_voc.itos[0:source.config.common_size]) trg_counter = collections.Counter( {source.trg_voc.itos[i]: c for i, c in trg_counter.items()}) self.trg_voc = Vocab( trg_counter, specials=source.trg_voc.itos[0:source.config.common_size]) src_s2b = [source.src_voc.stoi[w] for w in self.src_voc.itos] self.src_b2s = {b: s for s, b in enumerate(src_s2b)} trg_s2b = [source.trg_voc.stoi[w] for w in self.trg_voc.itos] self.trg_b2s = {b: s for s, b in enumerate(trg_s2b)} self.src_s2b = torch.LongTensor(src_s2b) self.trg_s2b = torch.LongTensor(trg_s2b) self.pad_index = self.trg_voc.stoi[PAD] self.sos_index = self.trg_voc.stoi[SOS] self.eos_index = self.trg_voc.stoi[EOS] self.use_cuda = use_cuda
def report(stats: collections.Counter, filename: str) -> None: # sort user name by counter value users = sorted(stats.items(), key=lambda kv: kv[1], reverse=True) with open(filename, 'w') as f: for user in users: f.write(f"{user[1]:<5} https://twitter.com/{user[0]}\n")
def write_ref_counts(aln_counts: collections.Counter, ref_counts_path: Path) -> None: with ref_counts_path.open('w') as ref_counts_file: for seq_name, count in sorted(aln_counts.items(), reverse=True, key=lambda item: item[1]): ref_counts_file.write(f'{seq_name}\t{count}\n')
def __str__(self): elt = [] for val, num in Counter.items(self): if num == 1: elt.append(repr(val)) else: elt.append("%r:%s" % (val, num)) return "{%s}" % ", ".join(elt)
def get_best_seq(aln_counts: collections.Counter, seq_sizes: Dict[str, int], min_size: int = None) -> Optional[str]: for seq_name, count in sorted(aln_counts.items(), reverse=True, key=lambda item: item[1]): if min_size is None or seq_sizes[seq_name] >= min_size: return seq_name return None
def __imul__(self, num): """In-place multiplication by a non-negative integer. >>> m = mset('abc') >>> m *= 3 >>> m == mset('abc' * 3) True >>> m *= 0 >>> m == mset() True """ if num == 0: self.clear() else: for key, val in Counter.items(self): self[key] = val * num return self
def create_profession_vector(sum_of_aspects: collections.Counter) -> dict: """Function takes a dict where keys are aspects for profession, values are number of aspect in profession and returns dict with all possible aspects and if aspects is present then show number of this aspect. So simply saying it's a vector for profession in a space of all possible aspects. Args: sum_of_aspects: this is receiver threough collections.Counter(), i.e. it's a dict which inform about how many and what aspects occurs in all sample in profession. Returns a dict will number of aspects for a profession. Number of all aspects. It's some aspects not presented then aspect equal to 0. """ # init dummy dict with all aspects combinations as keys as 0 values dict_vector = dict.fromkeys(list(KEY2IND.keys()), 0) # fill dict with presented in profession aspects and they numebers for aspect in sum_of_aspects.items(): dict_vector[aspect[0]] = aspect[1] # 0 means key, 1 means value return dict_vector
def _frequency_table_to_heap(ft: collections.Counter, branching_factor: int = 2) -> DWayHeap: """Takes a frequency table and creates a heap whose elements are nodes of the Huffman tree, with one node per unique character in the FT; for each element the priority associated to it is the frequency of the corresponding character. Args: ft: The frequency table, with char/number of occurrences (or document frequency) pairs. branching_factor: The branching factor for the d-ary heap that will be created. Returns: A d-ary heap containing one entry per unique character in the original text; Each entry is going to be an instance of `HuffmanNode`. """ characters, priorities = list(zip(*ft.items())) # Create a node for each character; use the inverse of the frequency because DWayHeap is a max heap priorities = list(map(lambda p: -p, priorities)) elements = list(map(lambda c: HuffmanNode(c, -ft[c]), characters)) return DWayHeap(elements=elements, priorities=priorities, branching_factor=branching_factor)
def create_bigramlist(cnt: collections.Counter) -> list: return [ item for item, _ in sorted( cnt.items(), key=operator.itemgetter(1), reverse=True) ]
def counter2hash(self, counter: collections.Counter): return reduce(mul, [self._alpha2prime[c] ** v for c, v in counter.items()]) % self._mod
from sys import stdin as Si, maxsize as m from math import floor as F from collections import defaultdict as dt, Counter as Co from operator import itemgetter as ig from math import pi if __name__ == "__main__": L = tuple(map(int, Si.readline().split())) H, Max = Co(L), 0 for k, v in H.items(): if v > 1: Max = max(Max, k * min(v, 3)) print(sum(L) - Max) """ A. Bear and Five Cards time limit per test 2 seconds memory limit per test 256 megabytes input standard input output standard output A little bear Limak plays a game. He has five cards. There is one number written on each card. Each number is a positive integer. Limak can discard (throw out) some cards. His goal is to minimize the sum of numbers written on remaining (not discarded) cards. He is allowed to at most once discard two or three cards with the same number. Of course, he won't discard cards if it's impossible to choose two or three cards with the same number.
def _stringify_counter_by_popularity(c: collections.Counter) -> str: """Given a counter, give a string summary in descending popularity.""" return ", ".join(f"{v} {k}" for k, v in sorted( c.items(), reverse=True, key=operator.itemgetter(1)))
for tempKey in sorted_freq: # for tempKey in sorted_Errors: reponse = motsDifficilesEtFrequence['motsDifficiles'][sorted_key[tempKey][ 0]]['Der-Die-Das'] + ' ' + motsDifficilesEtFrequence['motsDifficiles'][ sorted_key[tempKey][0]]['Mot en ALL'] motsDifficilesEtFrequence = {} motsDifficilesEtFrequence['frequenceErreurs'] = {} # motsDifficilesEtFrequence['frequenceErreurs']= {Counter({'1': 2, '3': 1}), 'motsDifficiles': {'1': {...}, '3': {...}}} motsDifficilesEtFrequence['frequenceErreurs'] = {'1': 1, '3': 4} sorted_Errors = sorted(motsDifficilesEtFrequence['frequenceErreurs'].items(), key=operator.itemgetter(1), reverse=True) # Counter = {'6': 2, '10': 1} Counter = {1: 2, 3: 4, 4: 3, 2: 1, 0: 0} sorted_Errors = sorted(Counter.items(), key=operator.itemgetter(1), reverse=True) print('*** Tes pires ennemis ***') # for tempKey in motsDifficilesEtFrequence['motsDifficiles']: # sorted_dict = collections.OrderedDict(sorted_Errors) for tempKey in sorted_Errors: pass z = ['blue', 'red', 'blue', 'yellow', 'blue', 'red'] stat = Counter(z) val = 'asdf' try:
def _get_percentage(counts: collections.Counter) -> List[Tuple[int, float]]: """Convert to percentage.""" total_count = float(sum(counts.values())) return [(k, v / total_count) for k, v in sorted(counts.items())]
def __iter__(self): for key, val in Counter.items(self): for i in range(val): yield key
def pairs(self): return Counter.items(self)
def create_wordstats(cnt: collections.Counter) -> list: return sorted(cnt.items(), key=operator.itemgetter(1), reverse=True)
def freeze(counter: collections.Counter) -> CharCount: """Turn a counter into a hashable representation.""" # Oh well, not efficient - but hey ho. return tuple(sorted(counter.items()))
Restdic, RestCnt = RestDicCnt n = 100 # number of restaurants m = 100 # number of customers numtt = n * m # total number of reviews num = m # number of non-missing reviews def Randmfn(ttCusts, m=100): idxs = list(range(len(ttCusts))) idxs = npr.permutation(idxs) return np.array(ttCusts)[idxs[:m]] # select the n most frequent restaurants subRests = Cntr() for key, nn in RestCnt.items(): if key in CityDic[ccity]: subRests[key] = nn Rests = subRests.most_common(n) #Rests = RestCnt.most_common(n) currentRest = Rests[0][0] ttCusts = list(Restdic[currentRest]) Custs = ttCusts #Custs = Randmfn(ttCusts,m=m) Custs = set(Custs) res = Cntr() for Cust in Custs: for rest, _ in Rests: custofrest = Restdic[rest]
freq_tech.update({k: cnt}) #Finds Unique coins in freq_tech for k, v in freq_tech.items(): for item in v: # print(k,v) if item not in unique_coins: unique_coins.append(item) unique_coins = sorted(unique_coins) #prints coins and theirs symobols that are found in unique coins_list for item in unique_coins: for k, v in coin_list.items(): if item == k: print(item, v) continue for k, v in freq_tech.items(): # print("---") for item, amount in v.items(): # print(item,amount) Counter[item] += amount print(Counter) for k, v in Counter.items(): for coin, names in coin_list.items(): if k == coin: print(coin, names, v)
from sys import stdin as Si, maxsize as m from math import floor as F from collections import defaultdict as dt, Counter as Co from operator import itemgetter as ig from math import pi if __name__ == '__main__': L = tuple(map(int, Si.readline().split())) H, Max = Co(L), 0 for k, v in H.items(): if v > 1: Max = max(Max, k * min(v, 3)) print(sum(L) - Max) ''' A. Bear and Five Cards time limit per test 2 seconds memory limit per test 256 megabytes input standard input output standard output A little bear Limak plays a game. He has five cards. There is one number written on each card. Each number is a positive integer. Limak can discard (throw out) some cards. His goal is to minimize the sum of numbers written on remaining (not discarded) cards. He is allowed to at most once discard two or three cards with the same number. Of course, he won't discard cards if it's impossible to choose two or three cards with the same number. Given five numbers written on cards, cay you find the minimum sum of numbers on remaining cards? Input