예제 #1
0
def user_show_configs_all(s: collections.defaultdict,
                          fill_character='_',
                          text=' Current User Configuration '):
    """Print all current configs to user"""

    # For `settings` number and value, print it's value to user
    l = utils_str_length_bigger(s)
    m = 0
    final_text = ''
    # This first loop is for get max length needed
    for n, v in s.items():
        t1 = f"· ({n}) {v[0]}:\t ".ljust(l)
        # t2 = f"\t {v[-1]}"
        # t3 = t1 + t2
        if len(t1) > m:
            m = len(t1)

    print()
    print(vals['default_char_sep'] * vals['terminal_size'])
    print(text.center(vals['terminal_size']))
    print(vals['default_char_sep'] * vals['terminal_size'])
    print()
    # This loop actualy print the correct message to user
    for n, v in s.items():
        t1 = f"· ({n}) {v[0]}: ".ljust(m, fill_character)
        t2 = f" {v[-1]}"
        t3 = t1 + t2
        print(t3)
    print()
def translate_evol_data(evolutions: defaultdict, pokedex : dict):
    for evolver, evolvees in evolutions.items():
        pokedex[evolver].to_list = evolvees
        pokedex[evolver].to_list_as_ids = [pokedex[v].id for v in evolvees]
        for v in evolvees:
            pokedex[v].ev_from.append(evolver)
            pokedex[v].ev_from_as_ids.append(pokedex[evolver].id)
예제 #3
0
 def msq_error(self, q_star: defaultdict):
     msqe = 0
     total_num = 0
     for key, value in q_star.items():
         msqe += (self.q_value[key] - value)**2
         total_num += 1
     return msqe / total_num
예제 #4
0
def write_gdp(regions_gdp: defaultdict, other_countries: defaultdict):
    region_key, gdp_key = 'region', 'GDP, millions of dollars'
    csv_columns = [region_key, gdp_key]
    dict_data = []
    for region, gdp in regions_gdp.items():
        dict_data.append({region_key: region, gdp_key: gdp})
    csv_file = "../data/postprocessed/gdp_by_region_incomplete.csv"
    print_csv_to_file(dict_data, csv_columns, csv_file)

    country_key = "country"
    csv_columns = [country_key, gdp_key]
    dict_data = []
    for country, gdp in other_countries.items():
        dict_data.append({country_key: country, gdp_key: gdp})
    csv_file = "../data/postprocessed/other_countries_gdp.csv"
    print_csv_to_file(dict_data, csv_columns, csv_file)
예제 #5
0
    def diameter(self, graph: dd):

        c = dd()

        def maxLength(start: int, counter: dd) -> int:

            counter[start] = 0

            if start not in graph:
                return 0

            for node in graph[start]:
                temp = maxLength(node, counter) + 1
                counter[start] = max(counter[start], temp)

            return counter[start]

        maxv = 0
        node = 0
        for key, value in graph.items():
            if key not in c:
                maxLength(key, c)
                print("c: ", c)

            if c[key] > maxv:
                maxv = c[key]
                node = key

        print(c)

        return node, maxv
예제 #6
0
def create_report(log_stat: defaultdict, report_size: int, count_all: int,
                  time_all: float) -> list:
    """
    На основе агрегированных данных логов строит отчет, находит все метрики
    """
    log_stat_list = list(log_stat.items())
    log_stat_list.sort(key=lambda i: i[1]["time_sum"], reverse=True)
    i = 0
    report = []
    while i < report_size:
        log = {
            "url":
            log_stat_list[i][0],
            "count":
            log_stat_list[i][1]["count"],
            "count_perc":
            log_stat_list[i][1]["count"] / count_all * 100,
            "time_sum":
            log_stat_list[i][1]["time_sum"],
            "time_perc":
            log_stat_list[i][1]["time_sum"] / time_all * 100,
            "time_avg":
            log_stat_list[i][1]["time_sum"] / log_stat_list[i][1]["count"],
            "time_max":
            max(log_stat_list[i][1]["values"]),
            "time_med":
            mediana(log_stat_list[i][1]["values"]),
        }
        report.append(log)
        i += 1
    return report
def save_phrase_table(phrase_table:defaultdict, filename):
    with open("phrase_table/"+filename, 'w') as file:
        file.write("Default = " + str(phrase_table.default_factory().default_factory())+"\n")
        for f,e_prob in phrase_table.items():
            file.write("********** f = "+f+"\n")
            for e,prob in e_prob.items():
                file.write(e +": "+str(prob)+"\n")
예제 #8
0
def save_to_excel_pegas_data(data: defaultdict) -> None:
    wb = Workbook()
    for branche_code, supporting_dates in data.items():

        ws = wb.create_sheet(title=str(branche_code))
        ws.merge_cells("A1:C1")
        ws["A1"].value = "Для файла Пегас"
        ws.column_dimensions["A"].width = 15
        ws.column_dimensions["B"].width = 15
        ws.column_dimensions["C"].width = 15

        ws["A2"] = "Начало периода"
        ws["B2"] = "Конец периода"
        ws["C2"] = "Количество дней"

        cell_number = 3
        day_range = 20
        period = 60
        for support_date in supporting_dates:
            for dt in date_range(support_date, day_range):
                ws[f"A{cell_number}"].value = (
                    dt - timedelta(days=period)).strftime("%Y-%m-%d")
                ws[f"B{cell_number}"].value = dt.strftime("%Y-%m-%d")
                ws[f"C{cell_number}"].value = period
                cell_number += 1

            cell_number += 1

    wb.save("test.xlsx")
예제 #9
0
def printFifty(Frequencies: defaultdict):
    file = open("top50words.txt", "w")
    sorted_dict = sorted(Frequencies.items(), key=lambda x: x[1], reverse=True)
    for t in sorted_dict:  # O(n)
        s = str(t[0]) + " -> " + str(t[1]) + '\n'
        file.write(s)  # O(1) + O(1) + O(1) + O(1) = O(1)
    file.close()
예제 #10
0
def topFifty(Frequencies: defaultdict):
    sorted_dict = sorted(Frequencies.items(), key=lambda x: x[1], reverse=True)
    Fifty = sorted_dict[0:50]
    newDic = defaultdict(int)
    for i in Fifty:
        newDic[i[0]] = i[1]
    return newDic
def frequency_search(freq: int, hist: defaultdict):
    word = []
    for v, k in hist.items():
        if k == freq:
            word.append(v)
    # print(word)
    return word
예제 #12
0
def defaultdict_to_dict(def_a: defaultdict):
    a = dict()
    for k, v in def_a.items():
        if isinstance(v, defaultdict):
            v = defaultdict_to_dict(v)
        a[k] = v
    return a
예제 #13
0
파일: dataset.py 프로젝트: entn-at/howl
 def __init__(self, metadata_list: List[AudioClipMetadata],
              label_map: defaultdict, **kwargs):
     super().__init__(**kwargs)
     self.metadata_list = metadata_list
     self.label_map = label_map
     self.vocab = {v: k for k, v in label_map.items()}
     self.vocab[label_map.get(None)] = NEGATIVE_CLASS
예제 #14
0
 def _make_sketch(kmer_counts_dict: defaultdict) -> CountMinSketch:
     # Read the dictionary into a compressed data structure to allow deleting kmer_counts_dict
     NUM_ROWS = 8
     kmer_counts = CountMinSketch(NUM_ROWS)
     for kmer, count in kmer_counts_dict.items():
         kmer_counts.update(kmer, count)
     return kmer_counts
예제 #15
0
def get_prerequisites(
        steps_dict: collections.defaultdict) -> collections.defaultdict:
    prerequisites = collections.defaultdict(list)
    for index, next_steps in steps_dict.items():
        for step in next_steps:
            prerequisites[step].append(index)
    return prerequisites
예제 #16
0
파일: srl_utils.py 프로젝트: jgung/tf-nlp
 def _end(self, context: defaultdict, output_file: str) -> None:
     out_path = os.path.join(self.out_file, output_file + '.phrases.tsv')
     with open(out_path, 'w') as out:
         print('writing phrases to %s...' % out_path)
         for phrase_label, phrase in context.items():
             for span, count in phrase.items():
                 out.write('%s\t%d\t%s\n' %
                           (arg_to_a(phrase_label), count, span))
예제 #17
0
파일: day14.py 프로젝트: groselt/aoc
def update_occurrences(pair_counts: defaultdict, rules: map) -> None:
    new_pair_counts = defaultdict(int)
    for pair, count in pair_counts.items():
        new = rules[pair]
        new_pair_counts[pair[0] + new] += count
        new_pair_counts[new + pair[1]] += count
    pair_counts.clear()
    pair_counts.update(new_pair_counts)
예제 #18
0
def utils_str_length_bigger(st: collections.defaultdict):
    """Return the longest string length in a given list"""
    m = 0
    for i in st.items():
        i = i[1][0]
        if len(i) > m:
            m = len(i)
    return m
예제 #19
0
def _freeze_nested_defaultdict(d: defaultdict) -> dict:
    d = dict(d)

    for k, v in d.items():
        if isinstance(v, defaultdict):
            d[k] = _freeze_nested_defaultdict(v)

    return d
예제 #20
0
def fill_board_manhattan_distance_less_than(
        board: np.array, maximum: int,
        manhattan_distance_dict: collections.defaultdict) -> np.array:
    board_copy = board.copy()
    for index, distances in manhattan_distance_dict.items():
        sum_distances = sum(map(lambda x: x['distance'], distances))
        board_copy[index] = (1 if sum_distances < maximum else 0)
    return board_copy
예제 #21
0
def count_stats(stats_data: defaultdict):
    counter = {}
    for stat, inc in stats_data.items():
        if stat is enum.Enum:
            stat = stat.value
        counter[stat] = len(inc)

    return sorted(counter.items(), key=lambda i: i[1], reverse=True)
예제 #22
0
def guess_symbol(frequency_map: defaultdict, frequency: float) -> str:
    s = ''
    diff = 100.0
    for symbol, freq in frequency_map.items():
        if abs(frequency - freq) < diff:
            diff = abs(frequency - freq)
            s = symbol

    return s
예제 #23
0
def comb0(coins: list, deno: int, cur: defaultdict, ans: set):
    if deno < 0:
        return
    elif deno == 0:
        ans.add(tuple((key, val) for key, val in cur.items()))
    else:
        for coin in coins:
            cur[coin] += 1
            comb0(coins, deno - coin, cur, ans)
            cur[coin] -= 1
예제 #24
0
def filter_words(histogram: defaultdict):
    filtered_words = []
    for w, i in histogram.items():
        if w.endswith('\'s') and w[:-2] in histogram:
            histogram[w[:-2]] += i
            filtered_words.append(w)
        elif w.endswith('s') and w[:-1] in histogram:
            histogram[w[:-1]] += i
            filtered_words.append(w)
    return filtered_words
    def get_statistics_info(self, main_dict: defaultdict) -> defaultdict:
        average_by_all = self.get_average_profit_by_all_enterprize(main_dict)
        statistic_dict = defaultdict(list)
        for key, item in main_dict.items():
            if item <= average_by_all:
                statistic_dict["less_than_average"].append(key)
            else:
                statistic_dict["more_than_average"].append(key)

        return statistic_dict
예제 #26
0
def visualize(title: str, series: defaultdict, filename: str,
              per_continent: bool) -> None:
    if per_continent:
        world = pygal.maps.world.SupranationalWorld()
    else:
        world = pygal.maps.world.World()

    world.title = title
    for s in series.items():
        world.add(*s)
    world.render_to_file(f'images/{filename}.svg')
예제 #27
0
def printFrequencies(Frequencies: defaultdict):
    sorted_dict = sorted(Frequencies.items(), key=lambda x: x[1],
                         reverse=True)  # O(n*log(n))
    # for t in sorted_dict: # O(n)
    #     print(t[0],"-> ",t[1]) # O(1) + O(1) + O(1) + O(1) = O(1)
    print(sorted_dict)
    file = open("outputA.txt", "w")
    for t in sorted_dict:  # O(n)
        s = t[0] + "-> " + str(t[1]) + '\n'
        file.write(s)  # O(1) + O(1) + O(1) + O(1) = O(1)
    file.close()
예제 #28
0
def fill_board_manhattan_distance(
        board: np.array,
        manhattan_distance_dict: collections.defaultdict) -> np.array:
    board_copy = board.copy()
    for index, distances in manhattan_distance_dict.items():
        min_distance = min(distances, key=lambda x: x['distance'])
        board_copy[index] = (min_distance['id'] if len([
            distance for distance in distances
            if (distance['distance'] == min_distance['distance'])
        ]) == 1 else '')
    return board_copy
예제 #29
0
def _fetch_broker_info(ctr_relation_buf: defaultdict):
    """Fetch broker information."""
    ctr_list = list()
    default_policy = {"broker_id": -1, "explorer_id": -1}
    for _broker, _explorer in ctr_relation_buf.items():
        default_policy.update({
            "broker_id": _broker,
            "explorer_id": list(_explorer)
        })
        ctr_list.append(default_policy.copy())

    return ctr_list
예제 #30
0
def defaultdict_to_dict(dictionary: defaultdict) -> Dict:
    """Recursively convert nested :obj:`defaultdict` to :obj:`dict`.

    Args:
        dictionary: A defaultdict.

    Returns:
        The defaultdict as a :obj:`dict`.
    """
    if isinstance(dictionary, defaultdict):
        dictionary = {k: defaultdict_to_dict(v) for k, v in dictionary.items()}
    return dictionary
예제 #31
0
파일: base.py 프로젝트: Ares2013/coreml
    def _gather_losses(self, loss_dict: defaultdict) -> dict:
        """Gather all values per loss in one tensor

        :param loss_dict: dictionary containing lists of various losses
        :type loss_dict: defaultdict

        :return: dict containing a running list of various losses per batch
        """
        for loss_name, loss_value in loss_dict.items():
            loss_dict[loss_name] = torch.cat(
                loss_dict[loss_name]).detach().cpu().numpy()

        return loss_dict
def get_cities(cities: defaultdict, region_id: int, users, needed_cities):
    for city_tup in sorted(cities.items(), key=lambda y : y[1]["uc"], reverse=True):
        city = city_tup[1]
        city_id = city["id"]
        if city["uc"] == -1:
            cities[city_id]["uc"] = get_city_users_count(city_id)
            city["uc"] = cities[city_id]["uc"]
            dumpData(cities, citiesFile)

        if city["uc"] > 5000:
            print(city["id"], ": ", city["title"], " - ", city["uc"])
            needed_cities.add(city_id)
            if users.get(city_id) == None:
                users[city_id] = defaultdict()
                users[city_id]["users"] = set()
                users[city_id]["count"] = 0
                dumpData(users,usersFile)
            find_users(users, city["id"], cities, needed_cities)
            dumpData(users,usersFile)
            sleep(randint(1,3))
    return cities
예제 #33
0
    mu = c.means_
    cov = c._get_covars()
    c_inv = []
    for co in cov:
        c_inv.append(np.linalg.inv(co))
    e_pr = np.sort(c.predict_proba(train_fd))
    '''
    dist = np.sort(c.transform(c_fn))
    ex = DD(list) #example id, distance to centroid
    ex_id = DD(list) #example id for each C
    ex_N = [] #number of examples for each C
    #for i,j,k in zip(c_labels, train, e_pr):
    for i,j,k in zip(c.labels_, train, dist):
        ex[i].append([j,k[0]])
        ex_id[i].append(int(j))
    for i,j in ex.items():
        ex[i] = sorted(j, key=lambda x: x[-1])
        ex_N.append([i,len(ex[i])])
    ex_N = sorted(ex_N, key=lambda x: x[-1],reverse=True) #sort cluster by density

    #confidence of training ex
    label_pr = np.sort(md.predict_proba(data2[train]))
    cf_d = DD()
    for i,pr in zip(train, label_pr):
        if len(pr)<2:
            margin = 1
        else:
            margin = pr[-1]-pr[-2]
        cf_d[i] = margin
    #cf_d = sorted(cf_d, key=lambda x: x[-1])
def word_search(key: str, hist: defaultdict):
    freq = 0
    for v, k in hist.items():
        if v == key:
            return k
    return freq
예제 #35
0
    '''
    c = AC(n_clusters=n_class, affinity='cosine', linkage='average')
    c.fit(train_fd)
    tmp = dd(list)
    for i,j in zip(c.labels_, train):
        tmp[i].append(j)
    for k,v in tmp.items():
        for vv in v:
            print k, input3[vv]
    '''
    c = KMeans(init='k-means++', n_clusters=n_class, n_init=10)
    c.fit(train_fn)
    tmp = DD(list)
    for i,j in zip(c.labels_,train):
        tmp[i].append(j)
    for k,v in tmp.items():
        for vv in v:
            pass
            #print k, input1[vv]

    ex = DD(list)
    dist = np.sort(c.transform(train_fn))
    for i,j,k in zip(c.labels_,train,dist):
        ex[i].append([j,k[0]])
        print i,k[0],input3[j]
    for i,j in ex.items():
        ex[i] = sorted(j, key=lambda x: x[-1])

    ex_auto = []
    ex_ora = []
    ora_idx = []
예제 #36
0
        corr, std = calculateCorrelation(cogDist, taxDist)
        print("CORRELATION: %f STD: %f" % (corr, std))
        print("\nStoring COG distance dictionary...")
        UtilStore(cogDist, COG_DIST_DICT())
        sys.exit(0)

    if (len(sys.argv) == 2) and (sys.argv[1] == "distCounts"):
        print("Building dict of taxonomy dist counts...")
        _, _, taxaDict, taxDist = \
            buildCogTaxaDict(noWeights = True)
        genTaxDistCntDict = DefDict(lambda: [0] *
            (TaxaType.maxDistance() + 1))
        for dir, tdd in taxDist.items():
            for d in tdd.values():
                genTaxDistCntDict[dir][d] += 1
        UtilStore(genTaxDistCntDict, GENOME_TAX_DIST_CNT_DICT())
        ttTaxDistCntDict = {}
        for dir, l in genTaxDistCntDict.items():
            ttTaxDistCntDict[taxaDict[dir].type.key] = l
        UtilStore(ttTaxDistCntDict, TAXTYPE_TAX_DIST_CNT_DICT())
        sys.exit(0)

    print("WRONG COMMAND LINE")