示例#1
0
    def gb_tree_global(
        cls,
        objs,
        funcs,
        leaf_func=lambda l: l,
    ):
        obj_list = list(objs)
        n_funcs = len(funcs)
        if not n_funcs: return leaf_func(obj_list)

        keys_obj_list = [
            tuple([func(obj) for func in funcs]) + (obj, ) for obj in obj_list
        ]

        funcs_ig = [ig(i) for i in range(n_funcs)]
        keys_obj_list_sorted = reduce(
            lambda l, f_key: SortTool.sorted_by_key_index(
                l,
                f_key,
            ),
            reversed(funcs_ig),
            keys_obj_list,
        )
        gb_tree = cls.gb_tree_local(
            keys_obj_list_sorted,
            funcs_ig,
            leaf_func=lambda l: leaf_func(lmap(ig(n_funcs), l)),
        )
        return gb_tree
示例#2
0
    def xy_list2photoframe_point_list(cls, xy_list):
        c_X = Counter(map(ig(0), xy_list))
        x_list = [
            x for x, n in c_X.most_common() if n >= cls.PHOTOFRAME_LENGTH
        ]

        c_Y = Counter(map(ig(1), xy_list))
        y_list = [
            y for y, n in c_Y.most_common() if n >= cls.PHOTOFRAME_LENGTH
        ]

        def p2is_topleft(p_IN):
            x, y = p_IN
            p_set = set([
                p for i in range(cls.PHOTOFRAME_LENGTH)
                for p in [(x + i,
                           y), (x, y +
                                i), (x + cls.PHOTOFRAME_LENGTH - 1, y +
                                     i), (x + i,
                                          y + cls.PHOTOFRAME_LENGTH - 1)]
            ])
            return p_set <= set(xy_list)

        p_list = lfilter(p2is_topleft, product(x_list, y_list))
        p_list_OUT = sorted(p_list, key=lambda p: (p[0], p[1]))
        return p_list_OUT
示例#3
0
def recognize_recording(song):
    fs, samples = read_file(song)
    spec = get_spectrogram(fs, samples, plot=False)
    peaks = get_peaks(spec, plot=False)
    hashes = gen_hashes(peaks)
    matches, songs = myDB.search(hashes)

    f = lambda x: (x[0], x[1])
    counts = [(*k, len(list(g)))
              for k, g in groupby(sorted(matches, key=f), key=f)]
    matches = [max(list(g), key=ig(2)) for k, g in groupby(counts, key=ig(0))]
    matches = sorted(matches, key=ig(2), reverse=True)

    result = []
    for song, offset, _ in matches[:5]:
        time_offset = int(float(offset) / 44100 * 4096 * 0.5)
        result.append({
            "song_id":
            song,
            "total_hashes":
            myDB.song_table[song],
            "input_hashes":
            len(hashes),
            "matched_hashes":
            songs[song],
            "input_confidence":
            round(songs[song] / len(hashes), 2),
            "fingerprint_confidence":
            round(songs[song] / myDB.song_table[song], 2),
            "offset_seconds":
            time_offset
        })
    return result
示例#4
0
def channelQuantilesFromImage(img, iles=(.5), useSampling=False):
    imageAsList = list(img.getdata())

    #    if useSample:
    #        print "*** !!!UNTESTED CODE. UNTESTED CODE!!! ***"
    #        indices = random.sample(xrange(numPoints), int(numPoints*samplePct))
    #        sampledlst = ig(*indices)(currentAsList) # a tuple
    #        evensize = int(math.sqrt(samplePct) * currentImage.size[0])
    #
    #        imageAsList = sampledlst[:evensize*evensize]

    pixelCount = len(imageAsList)

    # quantiles
    redq = quickQuantiles((ig(R)(c) for c in imageAsList),
                          pixelCount,
                          quantiles=iles)
    greenq = quickQuantiles((ig(G)(c) for c in imageAsList),
                            pixelCount,
                            quantiles=iles)
    blueq = quickQuantiles((ig(B)(c) for c in imageAsList),
                           pixelCount,
                           quantiles=iles)

    return redq, greenq, blueq
示例#5
0
 def reconstructQueue(self, people):
     people.sort(key=ig(1))
     people.sort(key=ig(0), reverse=True)
     ret = []
     for i in people:
         ret.insert(i[1], i)
     return ret
示例#6
0
def get_field(name, data_type, nullable):
    if isinstance(data_type, str):
        return T.StructField(name, ig(data_type)(mapper), bool(nullable))
    try:
        return get_field(name, ig(1)(data_type), bool(nullable))
    except:
        return get_field(name, ig('logicalType')(data_type), bool(nullable))
示例#7
0
 def baricentar(self):
   # Zanemarivanje kontrole ako još
   # nije birana transformacija
   if not self.tr.get():
     return
   
   # Popunjavanje centra transformacije
   if self.centar.get() in ('centar platna', 'centar mase'):
     baricentar = lambda t: (sum(map(ig(0), t))/len(t),
                             sum(map(ig(1), t))/len(t)) \
                                 if t else (0, 0)
     
     # Bira se centar mase ili centar platna
     t1, t2 = baricentar(self.ttačke) if self.centar.get() \
                                   == 'centar mase' else (0, 0)
     
     # Uključivanje polja za unos
     self.t1_koord.config(state = 'normal')
     self.t2_koord.config(state = 'normal')
     
     # Brisanje prethodnog sadržaja
     self.t1_koord.delete(0, 'end')
     self.t2_koord.delete(0, 'end')
     
     # Upisivanje novoizračunatih vrednosti
     self.t1_koord.insert(0, '%.2f'%t1)
     self.t2_koord.insert(0, '%.2f'%t2)
     
     # Gašenje polja za unos
     self.t1_koord.config(state = 'readonly')
     self.t2_koord.config(state = 'readonly')
示例#8
0
    def maximumGap(self, A):
        if len(A) == 0: return -1
        #print (A)
        G = {}
        for e in range(len(A)):
            G[e] = A[e]

        #G= sorted(G.items(),key=ig(0))
        G = sorted(G.items(), key=ig(1))
        print(G)
        s = 0  #max sub
        for x in range(len(G) - 1):
            #m = max(G[x:],key=ig(1))
            m = max(G[x + 1:], key=ig(0))  #finding max
            cc = G.index(m) + 1  #needed when there are common elements
            st = 0
            con = 0
            if con == 0:
                st = m[0] - G[x][0]  #last value of GL - current value
            else:
                st = m[0] - G[x][0]
            if st >= s:
                print(st, cc)
                s = st
        return s
        '''
示例#9
0
def dict2pdfa(mapping, start: PA.State):
    """Convert nested dictionary into a PDFA.

    - mapping is a nested dictionary of the form:

       mapping = {
         <State>:  (<Label>, {
            <Action>: {
                <State>: <Probability>
            }
         }
       }
    """
    label_map = fn.walk_values(ig(0), mapping)
    transition_map = fn.walk_values(ig(1), mapping)

    outputs = set(bind(mapping).Values()[0].collect())
    inputs = set(bind(mapping).Values()[1].Keys().collect())

    return PA.pdfa(
        start=start,
        label=label_map.get,
        inputs=inputs,
        outputs=outputs,
        **_encode_two_player_game(transition_map)
    )
示例#10
0
    def maximumGap(self, A):
        if len(A)==0:   return -1
        #print (A)
        G= {}
        for e in range(len(A)):
            G[e] = A[e]

        #G= sorted(G.items(),key=ig(0))
        G= sorted(G.items(),key=ig(1))
        print (G)
        s = 0   #max sub 
        for x in range(len(G)-1):
            #m = max(G[x:],key=ig(1))
            m = max(G[x+1:],key=ig(0))  #finding max 
            cc = G.index(m) +1          #needed when there are common elements
            st = 0
            con = 0
            if con==0:
                st = m[0]-G[x][0]   #last value of GL - current value
            else:
                st = m[0]-G[x][0]
            if st >= s:
                print (st,cc)
                s = st
        return s
            
        '''
示例#11
0
    def port_tradegood_lists2blocks(cls, port_tradegood_list, price_dict, lang,
                                    groupby_parameter_type):
        logger = HenriqueLogger.func_level2logger(
            cls.port_tradegood_lists2blocks, logging.DEBUG)
        logger.debug({"port_tradegood_list": port_tradegood_list})

        if groupby_parameter_type == PriceSkillParameter.Type.PORTLIKE:
            from henrique.main.skill.price.by_port.price_by_port import PriceByPort

            blocks = [
                PriceByPort.port2text(port_codename, lmap(ig(1), l),
                                      price_dict, lang) for port_codename, l in
                gb_tree_global(port_tradegood_list, [ig(0)])
            ]
            return blocks

        if groupby_parameter_type == PriceSkillParameter.Type.TRADEGOOD:
            from henrique.main.skill.price.by_tradegood.price_by_tradegood import PriceByTradegood

            blocks = [
                PriceByTradegood.tradegood2text(tg_codename, lmap(ig(0), l),
                                                price_dict, lang) for
                tg_codename, l in gb_tree_global(port_tradegood_list, [ig(1)])
            ]
            return blocks

        raise Exception(groupby_parameter_type)
示例#12
0
    def str_spans2replace_all(cls, text_in, span_sub_list):
        span_sub_list_sorted = sorted(span_sub_list, key=ig(0))
        span_list = lmap(ig(0), span_sub_list_sorted)
        assert_false(SpanTool.overlaps_any(span_list), {"spans overlapping": span_list})

        text_out = reduce(lambda text, span_value: StringTool.str_span2sub(text, span_value[0], span_value[1]),
                          reversed(span_sub_list_sorted),
                          text_in
                          )
        return text_out
示例#13
0
def multikeysort(items, columns):
    comparers = [((ig(col[1:].strip()), -1) if col.startswith('-') else
                  (ig(col.strip()), 1)) for col in columns]

    def comparer(left, right):
        comparer_iter = (cmp(fn(left), fn(right)) * mult
                         for fn, mult in comparers)
        return next((result for result in comparer_iter if result), 0)

    return sorted(items, key=cmp_to_key(comparer))
示例#14
0
 def accuracy(self, patterns):                        
     guessable, correction = zip(*patterns)
     guess = [self.update(g) for g in guessable]        
     correctionRanks = [map(ig(0), sorted(enumerate(c), key=ig(1), reverse=True)) for c in correction]
     guessRanks = [map(ig(0), sorted(enumerate(c), key=ig(1), reverse=True)) for c in guess]        
     confusionMatrix = [[0 for i in xrange(7)] for i in xrange(7)]
     for c, g in zip(correctionRanks, guessRanks):
         confusionMatrix[c[0]][g[0]] += 1
     print sum(c[0] == g[0] for c, g in zip(correctionRanks, guessRanks)) / float(len(patterns))
     for mat in confusionMatrix:
         print mat
示例#15
0
    def port_tradegood_iter2price_dict(cls, server_codename, port_tradegood_iter):
        port_tradegood_set = set(port_tradegood_iter)

        port_codenames = smap(ig(0), port_tradegood_set)
        tradegood_codenames = smap(ig(1), port_tradegood_set)

        prices_latest = MarketpriceDoc.ports_tradegoods2price_list_latest(server_codename, port_codenames, tradegood_codenames)
        # raise Exception({"prices_latest":prices_latest})

        price_dict = cls.prices2price_dict(prices_latest)
        return price_dict
示例#16
0
    def table2beamed(cls, table, i_pivot, beam):
        ij_list = list(cls._str_ll_i2ij_iter(table, i_pivot, beam))
        i2j_list = gb_tree_global(ij_list, [
            ig(0),
        ],
                                  leaf_func=lambda l: lmap(ig(1), l))

        table_filtered = lchain(
            [table[0]],
            [cls._i2j_list2l_out(table[i], j_list) for i, j_list in i2j_list],
        )
        return table_filtered
示例#17
0
def multikeysort(items, columns):
    comparers = [
        ((ig(col[1:].strip()), -1) if col.startswith('-') else (ig(col.strip()), 1))
        for col in columns
    ]

    def comparer(left, right):
        comparer_iter = (
            cmp(fn(left), fn(right)) * mult
            for fn, mult in comparers
        )
        return next((result for result in comparer_iter if result), 0)
    return sorted(items, key=cmp_to_key(comparer))
示例#18
0
    def test_03(self):
        l = [
            "asdf", "ade", "abe", "bde", "bed", "bdgef", "bdwege", "bedfwef",
            "bdasdf", "csdfe", "defdad", "cdsfe"
        ]

        hyp = dict_groupby_tree(
            l,
            [ig(0), ig(1), ig(2)],
        )
        ref = {
            'a': {
                'b': {
                    'e': ['abe']
                },
                'd': {
                    'e': ['ade']
                },
                's': {
                    'd': ['asdf']
                }
            },
            'b': {
                'd': {
                    'a': ['bdasdf'],
                    'e': ['bde'],
                    'g': ['bdgef'],
                    'w': ['bdwege']
                },
                'e': {
                    'd': ['bed', 'bedfwef']
                }
            },
            'c': {
                'd': {
                    's': ['cdsfe']
                },
                's': {
                    'd': ['csdfe']
                }
            },
            'd': {
                'e': {
                    'f': ['defdad']
                }
            }
        }

        # pprint(hyp)
        self.assertEqual(hyp, ref)
示例#19
0
def get_score_timeline(player, since, smooth=True):
    '''
    This will return a pandas timeseries of scores from `since` until the present.
    Pray
    '''
    initial_score = get_most_recent_score(player, before=since)
    recent_scorings = list(reversed(get_recent_scorings(player, since)))
    scores = [initial_score] + map(ig('score'), recent_scorings)
    timestamps = [since] + map(ig('ts'), recent_scorings)
    time_index = pd.DatetimeIndex(np.array(timestamps, dtype='M8[s]'))
    series = pd.TimeSeries(scores, time_index)
    resampled = series.resample('1h')
    interp = resampled.interpolate('time')
    smoothed = pd.ewma(interp, span=24)
    return smoothed if smooth else interp
示例#20
0
def get_score_timeline(player, since, smooth=True):
    '''
    This will return a pandas timeseries of scores from `since` until the present.
    Pray
    '''
    initial_score = get_most_recent_score(player, before=since)
    recent_scorings = list(reversed(get_recent_scorings(player, since)))
    scores = [initial_score] + map(ig('score'), recent_scorings)
    timestamps = [since] + map(ig('ts'), recent_scorings)
    time_index = pd.DatetimeIndex(np.array(timestamps, dtype='M8[s]'))
    series = pd.TimeSeries(scores, time_index)
    resampled = series.resample('1h')
    interp = resampled.interpolate('time')
    smoothed = pd.ewma(interp, span=24)
    return smoothed if smooth else interp
示例#21
0
    def class2child_classes(cls, clazz):
        members = inspect.getmembers(clazz, inspect.isclass)
        children = lfilter(lambda x: x != type, map(ig(1), members))

        for child in children:
            yield child
            yield from ClassTool.class2child_classes(child)
示例#22
0
def avg_compr(trip_dict, get="avg"):
    """Produces the ategory VP/NP average length from of all triples given a
    sentence. Probably the general (noun) phrase average length is near to 5. In
    the case there are many phrases, I chose the nearest one to the average in
    length. A possible reference is: Temperley D. (2005) 'The Dependency
    Structure of Coordinate Phrases: A Corpus Approach'."""
    from operator import itemgetter as ig

    for t in trip_dict:
        #trip_dict[t]=(trip_dict[t], max(trip_dict[t],key=len))
        if get == "max":
            trip_dict[t] = max(trip_dict[t], key=len)
        elif get == "avg":  # {'VP': [['activate']], 'NPa': [['ability']], 'NPb': [['fimb', 'expression']]}
            length = round(mean([len(f) for f in trip_dict[t]]))
            rank = sorted([(i, abs(len(f) - length))
                           for i, f in enumerate(trip_dict[t])],
                          key=lambda tup: tup[1])
            filt = [j[0] for j in rank if j[1] <= 1]
            phrases_meet = ig(*filt)(trip_dict[t])
            trip_dict[t] = list(trip_dict[t])[i]
        elif get == "med":
            length = int(median([len(f) for f in trip_dict[t]]))
            i = sorted([(i, abs(len(f) - length))
                        for i, f in enumerate(trip_dict[t])],
                       key=lambda tup: tup[1])[0][0]
            trip_dict[t] = list(trip_dict[t])[i]
            #print i
            #print length
            #print trip_dict
    return trip_dict
示例#23
0
 def get_split_ranks(self, split_feat=None, feature_id=None, do_print=False):
     '''Creates the rankings for each team based on the splits. Stores the result in a pandas data frame in self.ranks
         Arguments:
             split_feat - This is the result from split_features
             feature_id - the features that we are using. This will give us a column name for the table
     '''
     if split_feat == None:
         split_feat = dc(self.split)
     if feature_id == None:
         feature_id = dc(self.feature_ids)
     feature_ranks = []
     f_len = len(feature_id)
     i = 0
     for feature in split_feat:
         rank = []
         if do_print:
             print ''
             print '***'
             print feature_id[i]
             print '***'
         for team in sorted(feature.items(), key=ig(1), reverse=True):
             rank.append(team[0])
             if do_print:
                 print team
         i += 1
         feature_ranks.append(rank)
     feature_ranks = np.array(feature_ranks)
     t_ranks = dc(feature_ranks).transpose()
     self.ranks = t_ranks
示例#24
0
    def countingsorted(cls, iterable, f_key=None,
                       ):
        l = list(iterable)
        if not l: return l
        if f_key is None: f_key = lambda x: x

        n = len(l)
        key_obj_list = [(f_key(x), x)
                        for x in l]  # O(n)

        key_list = lmap(ig(0), key_obj_list)  # O(n)
        for k in key_list:  # O(n)
            if k < 0: raise Exception()

        m = max(key_list)  # O(n)
        counter = [0] * (m + 1)
        for key in key_list:  # O(n)
            counter[key] += 1

        index_list = [0] * (m + 1)
        total = 0
        for i, v in enumerate(counter):  # O(m)
            total += v
            index_list[i] = total

        l_result = [None] * n
        for key, obj in reversed(key_obj_list):  # O(n)
            i = index_list[key]
            l_result[i - 1] = obj
            index_list[key] -= 1

        #     for i,x in enumerate(l_result):
        #         if x is None: raise Exception(i)

        return l_result
示例#25
0
def merge(ls):
    from operator import itemgetter as ig
    from copy import deepcopy
    lReturn = []
    lTemp = []
    lFrames = []
    for l in ls:
        lFrames.append(round(l[0]['count'] / l[0]['countPercentage']))
    for l in ls:
        if len(lTemp) < 1:
            for res in l:
                lTemp.append(deepcopy(res))
        else:
            for res in l:
                check = 0
                for x in range(len(lTemp)):
                    if res['resid'] == lTemp[x]['resid']:
                        check += 1
                        lTemp[x]['count'] += res['count']
                if check == 0:
                    lTemp.append(res)
    lReturn = deepcopy(lTemp)
    for res in lReturn:
        res['countPercentage'] = res['count'] / sum(lFrames)
    lReturn.sort(key=ig('count'), reverse=True)
    return lReturn
示例#26
0
 def bazinga(self, b):
     b = c(b)
     b = sorted(b.items(), key=ig(0), reverse=True)
     for i in b:
         if i[1] % 2 != 0: return 'Conan'
     else:
         return 'Agasa'
示例#27
0
def multikeysort(items, columns):
    """Sort a ``list`` of ``dicts`` by multiple keys in ascending or descending
    order. To sort in descending order, prepend a '-' (minus sign) on the
    column name.

    Pulled from: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys

    Examples:
        >>> my_list = [
            {'name': 'apple', 'count': 10, 'price': 1.00},
            {'name': 'banana', 'count': 5, 'price': 1.00},
            {'name': 'orange', 'count': 20, 'price': 2.00},
        ]

        >>> multikeysort(my_list, ['-name', 'count'])
        [{'count': 20, 'name': 'orange', 'price': 2.0},
         {'count': 5, 'name': 'banana', 'price': 1.0},
         {'count': 10, 'name': 'apple', 'price': 1.0}]

        >>> multikeysort(my_list, ['-price', 'count'])
        [{'count': 20, 'name': 'orange', 'price': 2.0},
         {'count': 5, 'name': 'banana', 'price': 1.0},
         {'count': 10, 'name': 'apple', 'price': 1.0}]

    Args:
        items (list): The ``list`` of ``dict`` objects.
        columns (list): A ``list`` of columns names to sort `items`.

    Returns:
        list: The sorted ``list``.

    """
    comparers = [
        ((ig(col[1:].strip()), -1) if col.startswith('-') else (
        ig(col.strip()), 1))
        for col in columns
    ]

    def comparer(left, right):
        comparer_iter = (
            cmp(fn(left), fn(right)) * mult
            for fn, mult in comparers
        )
        return next((result for result in comparer_iter if result), 0)

    return sorted(items, key=cmp_to_key(comparer))
示例#28
0
        def buffer2is_call_triggered(buffer):
            if len(buffer) >= buffer_size:
                return True

            if IterTool.count(filter(ig(0), buffer)) >= chunk_size:
                return True

            return False
示例#29
0
    def test_02(self):
        l = [
            "asdf", "ade", "abe", "bde", "bed", "bdgef", "bdwege", "bedfwef",
            "bdasdf", "csdfe", "defdad", "cdsfe"
        ]

        hyp = gb_tree_global(l, [ig(0), ig(1), ig(2)],
                             leaf_func=lambda _l: lmap(lambda s: s[3:], _l))
        ref = [('a', [('s', [('d', ['f'])]), ('d', [('e', [''])]),
                      ('b', [('e', [''])])]),
               ('b', [('d', [('e', ['']), ('g', ['ef']), ('w', ['ege']),
                             ('a', ['sdf'])]), ('e', [('d', ['', 'fwef'])])]),
               ('c', [('s', [('d', ['fe'])]), ('d', [('s', ['fe'])])]),
               ('d', [('e', [('f', ['dad'])])])]

        # pprint(hyp)
        self.assertEqual(hyp, ref)
示例#30
0
 def findLongestChain(self, pairs):
     pairs.sort(key=ig(1))
     ret = 0
     p = pairs[0][0] - 1
     for a, b in pairs:
         if a > p:
             p = b
             ret += 1
     return ret
示例#31
0
 def findKNeighbours(self):
     distances = []
     for fp in session.query(Fingerprint).all():
         dist = abs(self.R1 - fp.vec.RSSI1) \
              + abs(self.R1 - fp.vec.RSSI2) \
              + abs(self.R1 - fp.vec.RSSI3)
         distances.append((dist, fp))
     distances = sorted(distances, key=ig(0))
     return [x[0] for x in distances][:4], [x[1] for x in distances][:4]
示例#32
0
    def groupby_tree_global(
        cls,
        objs,
        funcs,
        leaf_func=None,
    ):
        if leaf_func is None:
            leaf_func = lambda l: l

        obj_list, func_list = list(objs), list(funcs)
        if not obj_list:
            return []

        if not func_list:
            return leaf_func(obj_list)

        n, p = len(obj_list), len(func_list)  # index: i, j
        keys_index_obj_list = [
            tuple(chain([func(obj) for func in funcs], [i, obj]))
            for i, obj in enumerate(obj_list)
        ]

        dict_value2first_index_list = [
            IterTool.iter2dict_value2first_index(
                map(ig(j), keys_index_obj_list)) for j in range(p)
        ]

        def keys_index_obj2key_sort(keys_index_obj_list):
            assert_equal(len(keys_index_obj_list), p + 2)
            keys, i = keys_index_obj_list[:-2], keys_index_obj_list[-2]

            indexes = [
                dict_value2first_index_list[j][key]
                for j, key in enumerate(keys)
            ]
            return tuple(chain(indexes, [i]))

        funcs_ig = [ig(i) for i in range(p)]
        gb_tree = cls.groupby_tree_local(
            sorted(keys_index_obj_list, key=keys_index_obj2key_sort),
            funcs_ig,
            leaf_func=lambda l: leaf_func(lmap(ig(p + 1), l)),
        )
        return gb_tree
示例#33
0
    def module2classes_within(cls, module):
        from foxylib.tools.native.clazz.class_tool import ClassTool

        member_list = inspect.getmembers(module, inspect.isclass)
        clazzes = filter(lambda x: x.__module__ == module.__name__,
                         map(ig(1), member_list))

        for clazz in clazzes:
            yield from ClassTool.class2child_classes(clazz)
            yield clazz
示例#34
0
    def depth_func_pairlist2f_list(cls, depth_func_pairlist):
        if not depth_func_pairlist: raise Exception()

        depth_list = lmap(ig(0), depth_func_pairlist)
        DuplicateException.chk_n_raise(depth_list)

        maxdepth = max(map(ig(0), depth_func_pairlist))
        l = [
            None,
        ] * (maxdepth + 1)

        for depth, func in depth_func_pairlist:
            l[depth] = func

        for i in range(len(l)):
            if l[i] is not None: continue
            l[i] = idfun

        return l
示例#35
0
def channelQuantilesFromImage(img, iles = (.5), useSampling=False):
    imageAsList = list(img.getdata())

#    if useSample:
#        print "*** !!!UNTESTED CODE. UNTESTED CODE!!! ***"
#        indices = random.sample(xrange(numPoints), int(numPoints*samplePct))
#        sampledlst = ig(*indices)(currentAsList) # a tuple
#        evensize = int(math.sqrt(samplePct) * currentImage.size[0])
#        
#        imageAsList = sampledlst[:evensize*evensize]

    
    pixelCount = len(imageAsList)

    # quantiles
    redq   =  quickQuantiles((ig(R)(c) for c in imageAsList),
                             pixelCount, quantiles=iles)
    greenq =  quickQuantiles((ig(G)(c) for c in imageAsList),
                             pixelCount, quantiles=iles)
    blueq  =  quickQuantiles((ig(B)(c) for c in imageAsList),
                             pixelCount, quantiles=iles)

    return redq, greenq, blueq
示例#36
0
def sort(data, reverse=False, item=None):
    logging.debug(u"filtro 'sort': data='{0}', reverse='{1}', item='{2}'"
                  "".format(data, reverse, item))
    res = unicode("", "utf8")
    try:
        if item:
            try:
                res = sorted(data, reverse=reverse, key=ig(int(item)))
                if type(res) is str:
                    res = unicode(res, "utf8")
#                logging.debug(u"filtro 'sort': risultato='{0}'".format(res))
#                return res
            except:
                pass
            try:
                res = sorted(data, reverse=reverse, key=ig(item.encode("utf8")))
                if type(res) is str:
                    res = unicode(res, "utf8")
#                logging.debug(u"filtro 'sort': risultato='{0}'".format(res))
#                return res
            except:
                pass
            try:
                res = sorted(data, reverse=reverse, key=ig(item.decode("utf8")))
                if type(res) is str:
                    res = unicode(res, "utf8")
#                logging.debug(u"filtro 'sort': risultato='{0}'".format(res))
#                return res
            except:
                pass
        else:
            res = sorted(data, reverse=reverse)
    except:
        logging.warning(u"filtro 'sort': {0}".format(trb.format_exc()))
        
    logging.debug(u"filtro 'sort': risultato='{0}'".format(res))
    return res
 def majorityElement(self,A):
     N = C(A)
     print (max(N.items(),key=ig(1))[0])
from operator import itemgetter as ig
n = int(input())
d = {}  #dictionary
l = []  #list
for i in range(n):
    l = input().split()
    new_l =[]       #new list
    for j in range(1,len(l)):
        new_l.append(l[j])
    d[l[0]] = list(map(float,new_l))  # making dictionary and converting 
#print (d)                           # string list to int list    
for key, values in d.items():
    avg = round(sum(values)/len(values),2) # to round up to 2 digits
    #print (avg)
    d[key] = avg
sort_d = dict(sorted(d.items(), key=ig(1))) #sorting dictionary
print ("%.2f" %sort_d[input()])
#print (sort_d, d)



'''
by lambda
l = [15, 18, 2, 36, 12, 78, 5, 6, 9]
print reduce(lambda x, y: x + y, l) / len(l

by numpy
import numpy as np
print np.mean(l)

by statistics
Ids = manager.dict()
Q = mp.Queue()

# build queue
for n in range(10):
	Q.put(n)

# mirror my getid() function
def setid(d, key, val): d[key] = val

def worker(my_id, q, ids):
	while True:
		try:
			k = q.get(timeout=1)
			setid(ids, k, my_id)
			sleep(0.01) # let other worker have a chance
		except Queue.Empty:
			return

W = [ mp.Process(target=worker, args=(i, Q, Ids))
	for i in range(2) ]
for w in W: w.start()
for w in W: w.join()

# figure out who did what
from operator import itemgetter as ig
from itertools import groupby
for k,g in groupby(sorted(Ids.items(), key=ig(1)), key=ig(1)):
	print 'worker', k, 'wrote keys', [x[0] for x in g]

# Roman Numerals
# A library to read and write Roman numerals
# Programming Praxis Exercise 13
# http://programmingpraxis.com/2009/03/06/roman-numerals/

from collections import OrderedDict
from operator import itemgetter as ig


_romans = dict(M=1000, CM=900, D=500, CD=400, C=100, XC=90,
               L=50, XL=40, X=10, IX=9, V=5, IV=4, I=1)


# ordered dict from numerals to decimals {'M': 1000}
roman_numerals = OrderedDict(sorted(_romans.items(), key=ig(1), reverse=True))
# ordered dict from decimals to numerals {1000: 'M'}
roman_decimals = OrderedDict((d, n) for n, d in roman_numerals.items())


def roman_to_int(roman):
    "Convert a roman numeral to decimal."
    number = 0
    while roman:
        prefixes = [p for p in roman_numerals if roman.startswith(p)]
        if prefixes:
            number += roman_numerals[prefixes[0]]
            roman = roman[len(prefixes[0]):]
        else:
            raise Exception("Invalid roman numeral")
    return number
from operator import itemgetter as ig
from sys import stdin as Si
from itertools import chain


    
if __name__=='__main__':

    n = int(Si.readline())
    for j in range(n):
        A = Si.readline().strip()
        h = {}
        for i in range(ord('z'),ord('a')-1,-1):
            p=A.count(chr(i))+1
            h[p] = sorted(h.get(p,[])+[i],reverse=True)
        ans = [x[1] for x in sorted(h.items(),key=ig(0))]
        h = [chr(x) for x in chain.from_iterable(ans)]
        print(' '.join(h))
    

'''
Difficult Characters
Attempted by: 624
/
Accuracy: 40%
/
Tag(s):
Ad-Hoc, Easy, Implementation, Sorting
Problem
Editorial
My Submissions
from sys import stdin as Si
from operator import itemgetter as ig 
if __name__=='__main__':
    n,k,q = map(int,Si.readline().split())
    T = tuple(map(int,Si.readline().split()))
    stack = {}
    for i in range(q):
        a,b = map(int,Si.readline().split())
        #print(a,b,stack)
        if a==1:
            if len(stack)<k:    stack[b]=T[b-1]
            else:
                stack[b]=T[b-1]
                while len(stack)>k:
                    x,y = min(stack.items(),key=ig(1))
                    del stack[x]
                #stack[b] = T[b-1]
        elif a==2:
            if b in stack:  print('YES')
            else:   print('NO')
            
    
'''
B. Bear and Displayed Friends
time limit per test
2 seconds
memory limit per test
256 megabytes
input
standard input
output
from sys import stdin as Si
from collections import defaultdict as dt
from operator import itemgetter as ig 
if __name__=='__main__':
    n,m = map(int,Si.readline().split())
    h = dt(dict)

    for i in range(n):
        s,r,p = map(str,Si.readline().split())
        r,p = map(int,(r,p))
        if p in h[r]:   h[r][p]+=[s]
        else:   h[r][p]=[s]
    for da in h:
        d = sorted(h[da].items(),key=ig(0),reverse=True)
        #print(d)
        ns,i = [],0
        while len(ns)<2:
            if len(d[i][1])+len(ns)>=3:
                ns='?'
                break
            else:
                ns+= d[i][1][:2-len(ns)]
            i+=1
            if i>=len(d):   break
        print(' '.join(ns))
'''
Very soon Berland will hold a School Team Programming Olympiad. From each of the m Berland regions a team of two people is invited to participate in the olympiad. The qualifying contest to form teams was held and it was attended by n Berland students. There were at least two schoolboys participating from each of the m regions of Berland. The result of each of the participants of the qualifying competition is an integer score from 0 to 800 inclusive.

The team of each region is formed from two such members of the qualifying competition of the region, that none of them can be replaced by a schoolboy of the same region, not included in the team and who received a greater number of points. There may be a situation where a team of some region can not be formed uniquely, that is, there is more than one school team that meets the properties described above. In this case, the region needs to undertake an additional contest. The two teams in the region are considered to be different if there is at least one schoolboy who is included in one team and is not included in the other team. It is guaranteed that for each region at least two its representatives participated in the qualifying contest.

Your task is, given the results of the qualifying competition, to identify the team from each region, or to announce that in this region its formation requires additional contests.
        if hn in g:
            if urls not in g[hn]:  #to add only unique 
                g[hn] += (urls,)
        else:
            g[hn] = (urls,)
    res = {}        #grouping on number of hostnames queries
    for k,v in g.items():
        q = len(v)  #num of queries
        
        if q in res:    res[q]+=('http://'+k,)
        else:           res[q]=('http://'+k,)
    Del = []
    for k,v in res.items():
        if len(v)<=1:   Del.append(k)
    for i in range(len(Del)):   del res[Del[i]] 
    ans = sorted(res.items(), key=ig(1))
    print(len(ans))
    for a in ans:
        print(' '.join(a[1]))
    
'''
C. Hostname Aliases
time limit per test
5 seconds
memory limit per test
256 megabytes
input
standard input
output
standard output
from operator import itemgetter as ig 
if __name__=='__main__':

    n = int(input())
    for i in range(n):
        E = input()
        m = int(input())
        g = {}
        for j in range(m):
            w = input()
            g[w]= [str(E.index(x)) for x in w]
        g = sorted(g.items(),key=ig(1))
        print(g)
        for t in g:
            print(t[0])
            
        
'''
Adriana was playing with the English alphabet. When she was done playing with the alphabet, she realised that she had jumbled up the positions of the letters. Now, given a set of words, she wondered what would be the dictionary ordering of these words based on the new alphabet ordering which she made.

In other words, given a permutation of the English alphabet, E and a set of words S, you need to output the lexicographical ordering of the words in the set S based on the new alphabet, E.

Input

The first line will contain a single integer, T, denoting the number of test cases. T lines follow.
For each test case:
The first line will contain a string, E, the new alphabet ordering, which will be a permutation of 'abcdefghijklmnopqrstuvwxyz'
The next line will contain a single integer M, the size of the set S. S lines follow, each containing a single word, containing lowercase latin characters.

Output
            #   but avoid infinite loops
            pid_status = [os.waitpid(child, os.WNOHANG) \
                                        for child in children]

            # Only get children who have exited 
            pid_status = [pidt for pidt in pid_status \
                                            if pidt != (0, 0)]

            # Save successes
            success = [pidt for pidt in pid_status \
                                            if pidt[-1] == 0]
            successes += len(success)

            # Reset the number of children PIDs
            children = [pid for pid in children \
                        if pid in map(ig(0), pid_status)]

            # Save a list of failed processes
            for pidt in [pidt for pidt in pid_status \
                                    if pidt not in success]:
                failed.add(pid_to_link[pidt[0]])
                

        # If not too many workers, fork and download
        pid = os.fork()

        # Check for the parent or child
        if pid:                         # Parent process
            # Store child's PID
            children.append(pid)
            pid_to_link[pid] = link
示例#47
0
文件: des.py 项目: oberix/star
def main():
    import random as rnd
    import pandas as pnd
    import star.share.bag as bag
    from operator import itemgetter as ig
    logging.basicConfig(level=logging.DEBUG)
    
    # quadro macroeconomico
    cod = ""
    
    aree = ["AreaPIndustr", "AreaPTransizione", "AreaPSviluppo"]
    nome = "pàèsè {0}".format(rnd.randrange(0, 100))
    area = aree[rnd.randrange(3)]
    info = {"ISO3": nome, "nome": nome, "area": area}
    cagrPIL = dict(("pàèsè {0}".format(i), rnd.uniform(-20,20)) for i in xrange(100))
    cagrPIL["AreaPIndustr"] = rnd.uniform(-20,20)
    cagrPIL["AreaPTransizione"] = rnd.uniform(-20,20)
    cagrPIL["AreaPSviluppo"] = rnd.uniform(-20,20)
    exp = rnd.uniform(1, 5000)
    imp = rnd.uniform(1, 5000)
    exp0 = rnd.uniform(1, 5000)
    imp0 = rnd.uniform(1, 5000)
    sc = exp - imp
    sc0 = exp0 - imp0
    vsc = (sc / sc0) * 100 - 100
    vexp = (exp / exp0) * 100 - 100
    vimp = (imp / imp0) * 100 - 100
    var = {"vexp": vexp, "vimp": vimp, "vsc": vsc}
    bp = rnd.uniform(-50,50)
    idx_lavoro = (rnd.uniform(0,200), rnd.uniform(0,200))
    df = pnd.DataFrame([[info, cagrPIL, sc, var, bp, idx_lavoro]],
                       columns=["paese", "cagr", "sc", "var", "bil", "idx"])
    
    lm = {"paese": "paese", "cagr": "cagr", "sc": "saldo commerciale", 
          "var": "variazione saldo", "bil": "bilancia pagamenti", 
          "idx": "indice relativo costo lavoro"}
    
    b = bag.Bag(df, cod=cod, stype="desc", lm=lm)
    with open(os.path.join("/home/lcurzi/testi_auto/main_quadro_macroeconomico.xml"), "rb") as fd:
        main_file =  fd.read()
    
    with open(os.path.join("/home/lcurzi/testi_auto/portfolio_quadro_macroeconomico.xml"), "rb") as fd:
        portfolio_file =  fd.read()    
    
    b.main = main_file
    b.portfolio = portfolio_file
    b.engine = "xml"
    
    print("----------")
    logging.info(u"dataframe: {0}".format(b.df.T))
    print("\n")
    
    td = TexDes(b)
    o = td.out()
    
    print("----------")
    logging.info(u"output: {0}".format(o))
    print("\n")
    print("\n")
    
    # flussi
    cod = ""
        
    list_pos = [{"nome": "merceologià {0}".format(i), 
                 "saldo": rnd.uniform(0, +10000)} 
                for i in xrange(20)]
    list_pos.sort(reverse=True, key=ig("saldo"))
    list_pos.append({"nome": "totale", "saldo": sum([s["saldo"] for s in list_pos])})
    list_neg = [{"nome": "merceologià {0}".format(20 + i), 
                 "saldo": rnd.uniform(-10000, 0)} 
                for i in xrange(20)]
    list_neg.sort(key=ig("saldo"))
    list_neg.append({"nome": "totale", "saldo": sum([s["saldo"] for s in list_neg])})
    
    df = pnd.DataFrame({"lpos": [list_pos], "lneg": [list_neg]})
    
    lm = {"lpos": "lista merceologie positive", 
          "lneg": "lista merceologie negative",}
    
    b = bag.Bag(df, cod=cod, stype="desc", lm=lm)
    with open(os.path.join("/home/lcurzi/testi_auto/main_flussi.xml"), "rb") as fd:
        main_file =  fd.read()
    
    with open(os.path.join("/home/lcurzi/testi_auto/portfolio_flussi.xml"), "rb") as fd:
        portfolio_file =  fd.read()    
    
    b.main = main_file
    b.portfolio = portfolio_file
    b.engine = "xml"
    
    print("----------")
    logging.info(u"lpos:")
    for e in list_pos:
        logging.info(u"{0}".format(e))
    print("\n")
    logging.info(u"lneg:")
    for e in list_neg:
        logging.info(u"{0}".format(e))
    print("\n")
    
    td = TexDes(b)
    o = td.out()
    
    print("----------")
    logging.info(u"output: {0}".format(o))
    print("\n")
    print("\n")
    
    # categorie
    cod = ""
        
    list_pos = [{"nome": "categoria {0}".format(i), 
                 "saldo": rnd.uniform(0, +10000)} 
                for i in xrange(20)]
    list_pos.sort(reverse=True, key=ig("saldo"))
#    list_pos.append({"nome": "totale", 
#                     "saldo": sum([s["saldo"] for s in list_pos])})
    list_neg = [{"nome": "categoria {0}".format(20 + i), 
                 "saldo": rnd.uniform(-10000, 0)} 
                for i in xrange(20)]
    list_neg.sort(key=ig("saldo"))
    list_neg.append({"nome": "totale", "saldo": sum([s["saldo"] for s in list_neg])})
    
    df = pnd.DataFrame({"lpos": [list_pos], "lneg": [list_neg]})
    
    lm = {"lpos": "lista categorie positive", 
          "lneg": "lista categorie negative",}
    
    b = bag.Bag(df, cod=cod, stype="desc", lm=lm)
    with open(os.path.join("/home/lcurzi/testi_auto/main_categorie.xml"), "rb") as fd:
        main_file =  fd.read()
    
    with open(os.path.join("/home/lcurzi/testi_auto/portfolio_categorie.xml"), "rb") as fd:
        portfolio_file =  fd.read()    
    
    b.main = main_file
    b.portfolio = portfolio_file
    b.engine = "xml"
    
    print("----------")
    logging.info(u"lpos:")
    for e in list_pos:
        logging.info(u"{0}".format(e))
    print("\n")
    logging.info(u"lneg:")
    for e in list_neg:
        logging.info(u"{0}".format(e))
    print("\n")
    
    td = TexDes(b)
    o = td.out()
    
    print("----------")
    logging.info(u"output: {0}".format(o))
    print("\n")
if __name__== '__main__':
    n = int(Si.readline())
    #H = tuple(map(int,Si.readline().split()))
    h = {}
    for hh in tuple(map(int,Si.readline().split())):
        h[hh] = h.get(hh,0)+1
        
    m = int(Si.readline())
    lan = {}
    for l in tuple(map(int,Si.readline().split())):
        lan[l] = lan.get(l,0)+1
    sub = {}
    for s in tuple(map(int,Si.readline().split())):
        sub[s] = sub.get(s,0)+1
        
    Vp = max(h.items(), key=ig(1))
    VP = [k for k in h if h[k]==Vp[1]]
    print(VP)
    if len(VP)==1:  print(VP[0])
    else:
        '''
        M,Ap = -m,None
        for v in VP:
            if v not in sub:    continue
            if sub[v]>M: Ap=v;M=sub[v]
        print(Ap)
        '''
        tp = {v:lan[v] for v in VP if v in lan}
        ap = max(tp.items(),key=ig(1))
        AP = [k for k in tp if tp[k]==ap[1]]
        if len(AP)==1:  print(AP[0])