コード例 #1
0
ファイル: source.py プロジェクト: kelvin13/Knockout
    def _grid_glyphs(self, glyphs):
        x = self._x
        y = self._y
        
        K = self._K
        leading = self._leading
        FMX = self.font['__gridfont__'].character_index
        
        colored_chars = list(chain.from_iterable(zip_longest([], text, fillvalue=self._palatte.get(token, (0, 0, 0, 1))) for token, text in xml_lexer.get_tokens(''.join(self._CHARS))))
#        print(set(token for token, text in xml_lexer.get_tokens(''.join(self._CHARS))))
        lines = list(_linebreak(colored_chars, self._charlength))
        self._IJ = [0] + list(accumulate(len(l) for l, br in lines))
        self.y_bottom = y + leading * len(lines)
        
        y += leading
        xd = x + 30
        
        colored_text = {color: [] for color in self._palatte.values()}
        for l, line in enumerate(lines):
            for color, G in groupby(((FMX(character), xd + i*K, y + l*leading, color) for i, (color, character) in enumerate(line[0]) if character != '\n'),
                    key = lambda k: k[3]):
                try:
                    colored_text[color].extend((g, h, k) for g, h, k, c in G)
                except KeyError:
                    colored_text[color] = [(g, h, k) for g, h, k, c in G]
        
        N = zip(accumulate(line[1] for line in lines), enumerate(lines))
        numbers = chain.from_iterable(((FMX(character), x + i*K, y + l*leading) for i, character in enumerate(str(int(N)))) for N, (l, line) in N if line[1])
        colored_text[(0.7, 0.7, 0.7, 1)] = list(numbers)
        self._rows = len(lines)
        self._colored_text = colored_text
        
        #documentation
        """
コード例 #2
0
	def random(self):

		types=["one","diff","sum","ratio"]

		if(self.random_weight == None):

			t= random.choice(types)
			Channel1 = random.randint(0,self.nb_channels-1)
			Channel2 = random.randint(0,self.nb_channels-1)

		else:

			acc = self.random_weight
			weights = acc['RQE']['type']
			cumdist = list(itertools.accumulate(weights))
			x = random.random() * cumdist[-1]
			t = types[bisect.bisect(cumdist, x)]

			weights = acc['RQE']['channel']
			cumdist = list(itertools.accumulate(weights))
			x = random.random() * cumdist[-1]
			Channel1 = bisect.bisect(cumdist, x)
			x = random.random() * cumdist[-1]
			Channel2 = bisect.bisect(cumdist, x)


		option={ 'type' : 'RQE', 'RQE' : { 'type' :  t,
					'windows' : [
						{ 'Channel' : Channel1},
						{ 'Channel' : Channel2},
					]
				}
			}
		self.option = option
コード例 #3
0
ファイル: test-itertools.py プロジェクト: nbeney/tools
 def test_accumulate(self):
     res = accumulate([3, 4, 6, 2, 1, 9, 0, 7, 5, 8])
     self.assertEqual([3, 7, 13, 15, 16, 25, 25, 32, 37, 45], list(res))
     res = accumulate([3, 4, 6, 2, 1, 9, 0, 7, 5, 8], add)
     self.assertEqual([3, 7, 13, 15, 16, 25, 25, 32, 37, 45], list(res))
     res = accumulate([3, 4, 6, 2, 1, 9, 0, 7, 5, 8], max)
     self.assertEqual([3, 4, 6, 6, 6, 9, 9, 9, 9, 9], list(res))
コード例 #4
0
def solve(xs, n):
    max_upto = list(it.accumulate(xs, max))
    min_after = list(it.accumulate(reversed(xs), min))
    min_after.reverse()
    cnt = 1 + sum(1 for (left_max, right_min) in zip(max_upto, min_after[1:])
                    if left_max <= right_min)
    return cnt
コード例 #5
0
ファイル: finite_iterators.py プロジェクト: tri2sing/PyFP
def running_stats(data_iterator):
    ''' Returns iterators for the running min, max, product, and sum.'''
    # Replicate the input iterator as an iterator can be used only once.
    i1, i2, i3, i4 = tee(data_iterator, 4)
    rmin = accumulate(i1, min)
    rmax = accumulate(i2, max)
    rprd = accumulate(i3, mul)
    rsum  = accumulate(i4)
    return rmin, rmax, rprd, rsum
コード例 #6
0
ファイル: 01intertools.py プロジェクト: dreammis/simpleApp
def itaccumulate():
    """
    accumulate(可迭代对象[, 函数])  
    attention: python3 avaliable!  
    """
    list(accumulate(range(10)))
    # 叠加相加,默认是加,可以自己定义符号
    # [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]

    list(accumulate(range(1,5),operator.mul))
コード例 #7
0
ファイル: votable.py プロジェクト: zackmdavis/Finetooth
 def scored_plaintext(self, for_voter=None):
     plaintext = Tagnostic(self.content).plaintext()
     score_increments = [0] * (len(plaintext) + 1)
     mark_increments = [0] * (len(plaintext) + 1)
     for vote in self.vote_set.all():
         score_increments[vote.start_index] += vote.value
         score_increments[vote.end_index] -= vote.value
         if for_voter and vote.voter == for_voter:
             mark_increments[vote.start_index] += vote.value
             mark_increments[vote.end_index] -= vote.value
     return tuple(zip(plaintext,
                      itertools.accumulate(score_increments),
                      itertools.accumulate(mark_increments)))
コード例 #8
0
ファイル: groups.py プロジェクト: PsyMar/FutbolRatings
def sim_game(homeo, homed, awayo, awayd, HFAmult):
    import random
    from itertools import accumulate
    import bisect
    from math import fsum
    homeodds,awayodds = single_match_odds(homeo, homed, awayo, awayd, HFAmult)
    hometotalodds = list(accumulate(homeodds))
    awaytotalodds = list(accumulate(awayodds))
    homerand = random.uniform(0,hometotalodds[-1])
    awayrand = random.uniform(0,awaytotalodds[-1])
    home_goals = bisect.bisect(hometotalodds, homerand)
    away_goals = bisect.bisect(awaytotalodds, awayrand)
    return home_goals, away_goals
コード例 #9
0
ファイル: iters.py プロジェクト: uamana/splist
def random(iterators, weights):
    if weights is None:
        weights = [1 for x in iterators]
    if len(weights) != len(iterators):
        raise ValueError("len of iterators and weights must be same")
    cumdist = list(itertools.accumulate(weights))
    while iterators:
        it_pos = bisect.bisect(cumdist, rnd.random() * cumdist[-1])
        try:
            yield next(iterators[it_pos])
        except StopIteration:
            del iterators[it_pos]
            del weights[it_pos]
            cumdist = list(itertools.accumulate(weights))
コード例 #10
0
ファイル: generate_text.py プロジェクト: summerlight/anlp
        def __init__(self, lang):
            total = sum(lang_cnt_dict[i] + 1 for i in lang)
            #avg = total / len(lang)
            weights = (int(total / (lang_cnt_dict[i] + 1)) for i in lang)

            self.pop = list(lang)
            self.weights = list(itertools.accumulate(weights))
コード例 #11
0
def find_maximum_subarray(A):

    min_sum = max_sum = 0
    for running_sum in itertools.accumulate(A):
        min_sum = min(min_sum, running_sum)
        max_sum = max(max_sum, running_sum - min_sum)
    return max_sum
コード例 #12
0
ファイル: jelly.py プロジェクト: SandSnip3r/jelly
def reduce_cumulative(links, outmost_links, index):
	ret = [attrdict(arity = 1)]
	if len(links) == 1:
		ret[0].call = lambda t: list(itertools.accumulate(iterable(t), lambda x, y: dyadic_link(links[0], (x, y))))
	else:
		ret[0].call = lambda z: [reduce_simple(t, links[0]) for t in split_rolling(iterable(z), links[1].call())]
	return ret
コード例 #13
0
def test_camera_stream_frames_trunc_left():
    camera = mock.Mock()
    encoder = mock.Mock()
    camera._encoders = {1: encoder}
    stream = PiCameraCircularIO(camera, size=10)
    frames = []
    for data, frame in generate_frames('hkffkffhkff'):
        encoder.frame = frame
        if frame.complete:
            frames.append(frame)
        stream.write(data)
    del frames[:3]
    # As we've gotten rid of the start of the stream we need to re-calc the
    # video and split sizes in the comparison meta-data
    sizes = accumulate(f.frame_size for f in frames)
    frames = [
        PiVideoFrame(
            f.index,
            f.frame_type,
            f.frame_size,
            size,
            size,
            f.timestamp,
            f.complete
            )
        for f, size in zip(frames, sizes)
        ]
    assert stream.getvalue() == b'fkkffhkkff'
    assert list(stream.frames) == frames
    assert list(reversed(stream.frames)) == frames[::-1]
コード例 #14
0
ファイル: euler_lib.py プロジェクト: sminez/Euler
def scanl(func=op.add, acc=0, col=[]):
    '''
    Fold a collection from the left using a binary function
    and an accumulator into a list of values: [x, f(x), f(f(x), ...]
    '''
    with_acc = itools.chain([acc], col)
    return itools.accumulate(with_acc, func)
コード例 #15
0
def main():
    # cycle iterator can be used to cycle over a collection
    seq1 = ["Joe", "John", "Mike"]
    cycle1 = itertools.cycle(seq1)
    print(next(cycle1))
    print(next(cycle1))
    print(next(cycle1))
    print(next(cycle1))

    # use count to create a simple counter
    count1 = itertools.count(100, 10)
    print(next(count1))
    print(next(count1))
    print(next(count1))

    # accumulate creates an iterator that accumulates values
    vals = [10,20,30,40,50,40,30]
    acc = itertools.accumulate(vals, max)
    print(list(acc))
        
    # use chain to connect sequences together
    x = itertools.chain("ABCD", "1234")
    print(list(x))
    
    # dropwhile and takewhile will return values until
    # a certain condition is met that stops them
    print(list(itertools.dropwhile(testFunction, vals)))
    print(list(itertools.takewhile(testFunction, vals)))
コード例 #16
0
ファイル: views.py プロジェクト: middlet/anello
def compute_actual_burndown(cards):
  dd = create_done_dict(cards)
  number_days = days_in_month(next(iter(dd.values()))[0][0])
  done_hist = create_done_histogram(dd)
  dates = sorted([dateparser.parse(cards[ci]['history'][0][1]) for ci in cards if cards[ci]['history'][0][0]!='someday'])
  newitems = [0]*number_days
  for di in dates:
    newitems[di.day-1] += 1
  # cumulative frequency of new items
  newitems_cumfreq = [si for si in accumulate(newitems)]
  # cumulative freq of completed items
  done_cumfreq = [si for si in accumulate(done_hist)]
  # burndown is the difference
  actual_bdown = [newitems_cumfreq[ii]-val for ii,val in enumerate(done_cumfreq)]
  #
  return actual_bdown
コード例 #17
0
ファイル: solution.py プロジェクト: aragar/Python_2013
    def move(self, direction):
        if direction == self.OPPOSITE[self.direction]:
            raise ValueError

        new_head = PythonHead(self._head.coords + direction)

        if new_head.coords.x < 0 or new_head.coords.x >= len(self.world):
            raise Death
        if new_head.coords.y < 0 or new_head.coords.y >= len(self.world):
            raise Death

        new_head_cell = self.world[new_head.coords.x][new_head.coords.y]
        if isinstance(new_head_cell, PythonPart):
            raise Death

        if isinstance(new_head_cell, Food):
            self.energy += new_head_cell.energy
        new_head_cell.contents = new_head

        last_part_offset = list(accumulate(
            part.direction for part in self._body))[-1]
        last_part_coords = self._head.coords + last_part_offset
        self.world[last_part_coords.x][last_part_coords.y].contents = None

        new_part = PythonPart(self.OPPOSITE[self.direction])
        self.world[self._head.coords.x][
            self._head.coords.y].contents = new_part

        self.direction = direction
        self._body = [new_part] + self._body[:-1]
        self._head = new_head
コード例 #18
0
def fracKnapsack(vl, wt, W, n):

    r = list(sorted(zip(vl,wt), key=lambda x:x[0]/x[1],reverse=True))
    vl , wt = [i[0] for i in r],[i[1] for i in r]
    acc=list(accumulate(wt))
    k = bisect(acc,W)
    return 0 if k == 0 else sum(vl[:k])+(W-acc[k-1])*(vl[k])/(wt[k]) if k!=n else sum(vl[:k])
コード例 #19
0
def find_longest_subarray_less_equal_k(A, k):
    # Builds the prefix sum according to A.
    prefix_sum = list(itertools.accumulate(A))

    # Early returns if the sum of A is smaller than or equal to k.
    if prefix_sum[-1] <= k:
        return len(A)

    # Builds min_prefix_sum.
    min_prefix_sum = list(
        reversed(
            functools.reduce(lambda s, v: s + [min(v, s[-1])],
                             reversed(prefix_sum[:-1]), [prefix_sum[-1]])))
    a = b = max_length = 0
    while a < len(A) and b < len(A):
        min_curr_sum = (min_prefix_sum[b] - prefix_sum[a - 1]
                        if a > 0 else min_prefix_sum[b])
        if min_curr_sum <= k:
            curr_length = b - a + 1
            if curr_length > max_length:
                max_length = curr_length
            b += 1
        else:  # min_curr_sum > k.
            a += 1
    return max_length
コード例 #20
0
ファイル: 497.py プロジェクト: viing937/leetcode
 def __init__(self, rects):
     """
     :type rects: List[List[int]]
     """
     self.rects = rects
     self.sizes = list(map(lambda x: (x[2]-x[0]+1) * (x[3]-x[1]+1), rects))
     self.weights = list(accumulate(self.sizes))
コード例 #21
0
ファイル: texter.py プロジェクト: ChillarAnand/chamanti_ocr
def get_next_char_decay(char):  # Nearly six times slower!
    try:
        followers, accumulated_counts = bi_acc_cache_np[char]
    except KeyError:
        followers, counts = zip(*bigram_counts[char].items())
        accumulated_counts = np.fromiter(accumulate(counts), dtype=np.int32)
        bi_acc_cache_np[char] = followers, accumulated_counts

    loc = np.int32(accumulated_counts[-1] * random())
    follower = bisect.bisect(accumulated_counts, loc)
    current_count = accumulated_counts[follower]
    if follower:
        current_count -= accumulated_counts[follower-1]

    if current_count > 1:
        if current_count < REDUCE_COUNT_BY_nTH:
            decrement = 1
        else:
            decrement = current_count//REDUCE_COUNT_BY_nTH

        accumulated_counts[follower:] -= decrement

    # if len(accumulated_counts) == 5 and char == 'ణ్ని':
    #     print("{}) {:5d} < {:5d} picks {:2d} removed {:4d}//{} = {:3d} from {} {}".format(
    #         len(accumulated_counts), loc, accumulated_counts[-1], follower,
    #         current_count, REDUCE_COUNT_BY_nTH, decrement, accumulated_counts, char))

    return followers[follower]
コード例 #22
0
ファイル: stochastic.py プロジェクト: skurmedel/ulsys
 def impl(curr_axiom, curr_n):
     if curr_n < 1:
         return curr_axiom
     newaxiom = []
     # Replace each symbol in the axiom
     for S in curr_axiom:
         if S in s_to_rules:
             # Todo: These steps could be moved out of the loop!
             # Sort by probability
             current_rules = sorted(s_to_rules[S], key=lambda x: x[0])
             # The total probability for all the rules for the given S
             prob_total = sum((R for R, Abc in current_rules))
             
             # Construct a list of weights, ordered and scaled by total
             weights = [R/prob_total for R, Abc in current_rules]
             weights_acc = list(itertools.accumulate(weights))
             p = rng()
             
             selected_rule = None
             last_w = 0
             for i,w in zip(range(0, len(weights_acc)), weights_acc):
                 if p >= last_w and p <= w:
                     selected_rule = current_rules[i]
                     break
                 else:
                     last_w = w
             _, Abc = selected_rule
             newaxiom.extend(Abc)
         else:
             newaxiom.append(S)
             continue
     return impl(newaxiom, curr_n - 1)
コード例 #23
0
ファイル: utils.py プロジェクト: kittttttan/pe
 def triangle_numbers():
     """
     >>> import itertools
     >>> [i for i in itertools.islice(triangle_numbers(), 5)]
     [1, 3, 6, 10, 15]
     """
     return itertools.accumulate(itertools.count(1))
コード例 #24
0
ファイル: indexing.py プロジェクト: kwohlfahrt/GLPy
def flatOffset(idxs, shape, base=1):
	idxs = chain(idxs, repeat(0, len(shape) - len(idxs)))
	idxs = [idx.indices(s)[0] if isinstance(idx, slice) else idx % s
	        for idx, s in zip(idxs, shape)]
	shape = chain((base,), reversed(shape))
	shapes = accumulate(shape, operator.mul)
	return sum( idx * s for idx, s in zip(reversed(idxs), shapes) )
コード例 #25
0
def transform_labels_with_representation(labels):

    representation = find_representation(labels)
    labels_count = labels.shape[0]
    new_labels = np.zeros((labels_count, 1))
    labels_sums = np.array(list(map(lambda x: list(itertools.accumulate(x)), labels))).T
    starting_from = 0

    for class_size, class_k in representation:
        sums = labels_sums[starting_from + class_size - 1]
        if starting_from != 0:
            sums -= labels_sums[starting_from - 1]
        #print(sums)
        extra = [[(class_k - sums[label_id])*1.0/class_k for val_id in range(class_k)] for label_id in range(labels_count)]
        extra = np.array(extra)
        #print(new_labels.shape)
        #print(labels.shape)
        new_labels = np.append(new_labels, labels[:, starting_from: (starting_from + class_size)], axis=1)
        new_labels = np.append(new_labels, extra, axis=1)
        starting_from += class_size
    K = count_max_number_of_ones_in_matrix(new_labels)
    for label_id in range(labels_count):
        for char_id in range(len(new_labels[label_id])):
            new_labels[label_id, char_id]*=1.0/K
    print('Representation : ', representation)
    return new_labels[:, 1:], representation
コード例 #26
0
def solve(xs, d):
    xs.sort()
    ms, fs = list(zip(*xs))
    n = len(ms)

    fsacc = list(it.accumulate(it.chain((0,), fs)))

    out = []
    for i, money in enumerate(ms):
        k = bisect.bisect_right(ms, money + d - 1)
        # print_stderr('----------')
        # print_stderr('i =', i)
        # print_stderr('k =', k)
        # print_stderr('d =', d)
        # print_stderr('ms=', ms)
        # print_stderr('ms[i] =', ms[i])
        # print_stderr('ms[k-1] =', ms[k-1])
        # print_stderr('fs[i] =', fs[i])
        # print_stderr('fs[k-1] =', fs[k-1])

        # out.append(sum(fs[i:k]))
        out.append(fsacc[k] - fsacc[i])
        if k == n:
            break

    return max(out)
コード例 #27
0
ファイル: views.py プロジェクト: chrismedrela/amy
    def instructors_over_time(self, request, format=None):
        """Cumulative number of instructor appearances on workshops over
        time."""

        badges = Badge.objects.instructor_badges()

        qs = Person.objects.filter(badges__in=badges)
        filter = InstructorsOverTimeFilter(request.GET, queryset=qs)
        qs = filter.qs.annotate(
            date=Min('award__awarded'),
            count=Value(1, output_field=IntegerField())
        ).order_by('date')

        serializer = InstructorsOverTimeSerializer(qs, many=True)

        # run a cumulative generator over the data
        data = accumulate(serializer.data, self._add_counts)

        # drop data for the same days by showing the last record for
        # particular date
        data = self._only_latest_date(data)

        data = self.listify(data, request, format)

        return Response(data)
コード例 #28
0
ファイル: harmony.py プロジェクト: juliensiebert/harmony
def chord_notes(root = 'C', ch_name = 'M', mode = 'ionian'):
  """
  returns a list of notes that compose a given chord
  @param root (str) the root note
  @param ch_name (str) the modifier (M for major, m for minor...)
  @param mode (str) the mode
  
  >>> chord_notes()
  ['C', 'E', 'G']
  
  >>> chord_notes(root='F')
  ['F', 'A', 'C']
  
  >>> chord_notes(root='G', ch_name='7')
  ['G', 'B', 'D', 'F']
  """
  assert root in Notes and ch_name in Chords and mode in Modes
  intervals_cumsum = [0] + list(accumulate(Modes[mode]))
  return [
    Notes[i] for i in 
    [
      (Notes.index(root) + intervals_cumsum[deg-1] + ChordsModifiers[mod])  % len(Notes)
      for deg, mod in Chords[ch_name]
    ]
  ]
def bwtPartialMatch(pattern, d, sarr):
    # divide the pattern into d+1 parts
    patternLen = len(pattern)
    divLen = int(patternLen/(d+1))
    extra = patternLen % (d+1)
    divLen = [divLen]*(d+1-extra) + [divLen+1]*extra
    divIndex = list(itertools.accumulate([0]+divLen))

    # find a pattern that has exact match
    smallPatterns = [pattern[divIndex[pi]:divIndex[pi+1]] for pi in range(d+1)]
    smallMatchIndexes = [bwtMatch(p) for p in smallPatterns]

    # extend the non empty patterns
    nonEmptyPatternMatchIndexes = [pi for pi in range(d+1) if len(smallMatchIndexes[pi]) > 0]
    allPartialMatchIndexes = set()
    for pi in range(d+1):
        if len(smallMatchIndexes[pi]) > 0:
            # if there are exact matches
            exactMatchIndexes = smallMatchIndexes[pi]
            extendedStartIndexes = [sarr[mi] - divIndex[pi] \
                                    for mi in exactMatchIndexes\
                                    if sarr[mi] - divIndex[pi] >= 0]
            
            partialMatchIndexes = [si for si in extendedStartIndexes \
                                   if hammingDistance(genome[si:si+patternLen], pattern) <= d]

            for i in partialMatchIndexes:
                allPartialMatchIndexes.add(i)
            
        
    return allPartialMatchIndexes
コード例 #30
0
    def encode(self, values):
        self.validate_value(values)

        raw_head_chunks = []
        tail_chunks = []
        for value, encoder in zip(values, self.encoders):
            if getattr(encoder, 'is_dynamic', False):
                raw_head_chunks.append(None)
                tail_chunks.append(encoder(value))
            else:
                raw_head_chunks.append(encoder(value))
                tail_chunks.append(b'')

        head_length = sum(
            32 if item is None else len(item)
            for item in raw_head_chunks
        )
        tail_offsets = (0,) + tuple(accumulate(map(len, tail_chunks[:-1])))
        head_chunks = tuple(
            encode_uint_256(head_length + offset) if chunk is None else chunk
            for chunk, offset in zip(raw_head_chunks, tail_offsets)
        )

        encoded_value = b''.join(head_chunks + tuple(tail_chunks))

        return encoded_value
コード例 #31
0
for item in items:
    w, v = item
    if w == w0:
        w0_list.append(v)
    if w == w0 + 1:
        w1_list.append(v)
    if w == w0 + 2:
        w2_list.append(v)
    if w == w0 + 3:
        w3_list.append(v)
w0_list = sorted(w0_list, reverse=True)
w1_list = sorted(w1_list, reverse=True)
w2_list = sorted(w2_list, reverse=True)
w3_list = sorted(w3_list, reverse=True)

w0_accum = tuple(accumulate(w0_list))
w1_accum = tuple(accumulate(w1_list))
w2_accum = tuple(accumulate(w2_list))
w3_accum = tuple(accumulate(w3_list))

ans = 0
for i in range(len(w0_list) + 1):
    for j in range(len(w1_list) + 1):
        for k in range(len(w2_list) + 1):
            for m in range(len(w3_list) + 1):
                if w0 * i + (w0 + 1) * j + (w0 + 2) * k + (w0 + 3) * m > W:
                    continue
                else:
                    tmp = 0
                    if i > 0:
                        tmp += w0_accum[i - 1]
コード例 #32
0
def is_prime(n):
    if n == 1: return 0

    for k in range(2, int(math.sqrt(n)) + 1):
        if n % k == 0:
            return 0
    return 1


Q = int(input())
L = [0] * Q
R = [0] * Q

for j in range(Q):
    L[j], R[j] = [int(i) for i in input().split()]

min_L = min(L)
max_R = max(R)

li = []
for i in range(min_L, max_R + 1, 1):
    if i % 2 == 1:
        li.append(is_prime(i) * is_prime((i + 1) // 2))
    else:
        li.append(0)

ans = [0]
ans += list(accumulate(li))

for i, j in zip(L, R):
    print(ans[j - min_L + 1] - ans[i - min_L])
コード例 #33
0
 def select_mutation(self):
     # Implementation from https://docs.python.org/3/library/random.html -- Ctrl+F "weights"
     choices, weights = zip(*self.mutations.items())
     cumdist = list(accumulate(weights))
     x = random.random() * cumdist[-1]
     self.selected_mutation = choices[bisect(cumdist, x)]
コード例 #34
0
def visualizeTransactions(resultDir, transactionFilePath):
    if transactionFilePath:
        # read transactions file
        with open(transactionFilePath) as transactions_file:
            transactions = json.loads(transactions_file.read())

        # prepare transaction data
        data_by_day = groupBy(filter(transactionIsBtc, transactions["data"]),
                              transactionToIsoDate)
        spent_data_by_day_last_month = groupBy(
            filter(
                lambda t: transactionIsEurSpent(t) and transactionIsLastMonth(
                    t),
                transactions["data"],
            ),
            transactionToYearMonthDay,
        )
        spent_data_by_year_month = groupBy(
            filter(transactionIsEurSpent, transactions["data"]),
            transactionToYearMonth)
        spent_data_by_user = groupBy(
            filter(transactionIsEurSpent, transactions["data"]),
            transactionToUserEmail)

        wallet_balance_btc_by_day_keys = list(data_by_day.keys())
        wallet_balance_btc_by_day_keys.sort()
        wallet_balance_btc_by_day_keys_datetimes = list(
            map(
                lambda d: np.datetime64(isoDateToDatetime(d)),
                wallet_balance_btc_by_day_keys,
            ))
        wallet_balance_btc_by_day_values = list(
            accumulate([
                sum(map(transactionToBtc, data_by_day[k]))
                for k in wallet_balance_btc_by_day_keys
            ]))
        eur_by_day_last_month = {
            k: -1 * sum(map(transactionToEur, v))
            for k, v in spent_data_by_day_last_month.items()
        }
        eur_by_year_month = {
            k: -1 * sum(map(transactionToEur, v))
            for k, v in spent_data_by_year_month.items()
        }
        eur_by_user = {
            k: -1 * sum(map(transactionToEur, v))
            for k, v in spent_data_by_user.items()
        }

        # draw diagrams
        plt.rcdefaults()

        drawBarChart(
            "Transactions per day over the last month",
            "EUR",
            eur_by_day_last_month.keys(),
            eur_by_day_last_month.values(),
        )

        plt.savefig(os.path.join(resultDir, "transactions_per_day.png"),
                    bbox_inches="tight")

        drawBarChart(
            "Transactions per month",
            "EUR",
            eur_by_year_month.keys(),
            eur_by_year_month.values(),
        )
        plt.savefig(os.path.join(resultDir, "transactions_per_month.png"),
                    bbox_inches="tight")

        drawEurPerUser("Transactions per user", "EUR", eur_by_user.keys(),
                       eur_by_user.values())
        plt.savefig(os.path.join(resultDir, "transactions_per_user.png"),
                    bbox_inches="tight")

        drawTimeSeries(
            "BTC wallet per day in last month",
            "BTC",
            wallet_balance_btc_by_day_keys_datetimes,
            wallet_balance_btc_by_day_values,
        )
        plt.savefig(os.path.join(resultDir, "wallet_balance_per_day.png"),
                    bbox_inches="tight")
コード例 #35
0
    # 3, 7, 11
    list1.append(next(t))

print(list1)

list2 = [1, 8, 2, 4]
t1 = itertools.cycle(list2)
print(next(t1))
print(next(t1))
print(next(t1))
print(next(t1))
print(next(t1))
print(next(t1))

# add all previous values to current value
t2 = itertools.accumulate(list2)
print(list(t2))

# loops thru the list and pins to the largest number
t2 = itertools.accumulate(list2, max)
print(list(t2))

t3 = itertools.chain("Red", "Hat")
print(list(t3))

numbers = [0, 1, 2, 4, 5, 7]
t4 = itertools.dropwhile(functionA, numbers)
print(list(t4))

t5 = itertools.takewhile(functionA, numbers)
print(list(t5))
コード例 #36
0
def PrefixSum(arr):
    return list(accumulate(arr))
コード例 #37
0
ファイル: 60_pref_sum.py プロジェクト: nazomeku/codefights
def pref_sum(a):
    return list(accumulate(a))
コード例 #38
0
 def accumulate(self, iterable, func=operator.add):
     return itertools.accumulate(iterable, func)
コード例 #39
0
ファイル: abc035_c_imos.py プロジェクト: masakiaota/kyoupuro
 def get_result(self):
     '''
     O(N) かけて、区間の加算結果を取得する
     '''
     from itertools import accumulate
     return list(accumulate(self.ls[:-1]))
コード例 #40
0
 def _handle_cumulative(self, entity_metrics: List) -> List[Dict[str, Any]]:
     for metrics in entity_metrics:
         metrics.update(data=list(accumulate(metrics["data"])))
     return entity_metrics
コード例 #41
0
from itertools import accumulate

INF = float('inf')

N, *A = map(int, open(0).read().split())

l = list(accumulate(A, min))
r = list(accumulate(A[::-1], min))[::-1]

result = INF

# 凸 タイプの門松列の場合
for i in range(1, N - 1):
    a = l[i - 1]
    b = A[i]
    c = r[i + 1]
    if a <= b and c <= b:
        result = min(result, a + b + c)

# 凹 タイプの門松列の場合
for i in range(1, N - 1):
    a = l[i - 1]
    b = A[i]
    c = r[i + 1]
    if b <= a and b <= c:
        result = min(result, a + b + c)

if result == INF:
    print(-1)
else:
    print(result)
コード例 #42
0
ファイル: main2.py プロジェクト: keijak/comp-pub
import itertools
import collections

N = int(input())
A = list(map(int, input().split()))
acum = list(itertools.accumulate(A, initial=0))
c = collections.Counter(acum)
print(sum(n * (n - 1) // 2 for n in c.values()))  # nC2
コード例 #43
0
def calc_peptide_prefix_masses(peptide):
    return list(it.accumulate(peptide.masses, operator.add, initial=0))
コード例 #44
0
ファイル: plackett_luce.py プロジェクト: erdman/plackett-luce
def plackett_luce(rankings,
                  tolerance=1e-9,
                  check_assumption=True,
                  normalize=True,
                  verbose=False):
    '''This algorithm returns the MLE of the Plackett-Luce ranking parameters
    over a given set of rankings.  It requires that the set of players is unable
    to be split into two disjoint sets where nobody from set A has beaten anyone from
    set B.  If this assumption fails, the algorithm will diverge.  If the
    assumption is checked and fails, the algorithm will short-circuit and
    return None.

    Input is a list of dictionaries, where each dictionary corresponds to an
    individual ranking and contains the player : finish for that ranking.

    Output is a dictionary containing player : plackett_luce_parameter keys
    and values.
    '''
    players = set(key for ranking in rankings for key in ranking.keys())
    rankings = [
        sorted(ranking.keys(), key=ranking.get) for ranking in rankings
    ]
    if verbose:
        print('Using native Python implementation of Plackett-Luce.')
        print('{:,} unique players found.'.format(len(players)))
        print('{:,} rankings found.'.format(len(rankings)))
    if check_assumption:
        edges = [(source, dest) for ranking in rankings
                 for source, dest in combinations(ranking, 2)]
        scc_count = len(set(scc(edges).values()))
        if verbose:
            if scc_count == 1:
                print(
                    'No disjoint sets found.  Algorithm convergence conditions are met.'
                )
            else:
                print('{:,} disjoint sets found.  Algorithm will diverge.'.
                      format(scc_count))
        if scc_count != 1:
            return None

    ws = Counter(name for ranking in rankings for name in ranking[:-1])
    gammas = {player: 1.0 / len(players) for player in players}
    gdiff = float('inf')
    iteration = 0
    start = time.perf_counter()
    while gdiff > tolerance:
        _gammas = gammas
        gamma_sums = [
            list(
                accumulate(1 / s for s in reversed(
                    list(
                        accumulate(gammas[finisher]
                                   for finisher in reversed(ranking))))))
            for ranking in rankings
        ]
        gammas = {
            player: ws[player] /
            sum(gamma_sum[min(ranking.index(player),
                              len(ranking) - 2)]
                for ranking, gamma_sum in zip(rankings, gamma_sums)
                if player in ranking)
            for player in players
        }
        if normalize:
            gammas = {
                player: gamma / sum(gammas.values())
                for player, gamma in gammas.items()
            }
        pgdiff = gdiff
        gdiff = sqrt(
            sum((gamma - _gammas[player])**2
                for player, gamma in gammas.items()))
        iteration += 1
        if verbose:
            now = time.perf_counter()
            print("%d %.2f seconds L2=%.2e" % (iteration, now - start, gdiff))
            if gdiff > pgdiff:
                print("Gamma difference increased, %.4e %.4e" %
                      (gdiff, pgdiff))
            start = now
    return gammas
コード例 #45
0
ファイル: 2212444.py プロジェクト: qifanyyy/CLCDSA
from heapq import heapify, heappushpop
from itertools import accumulate

x, y, z = map(int, input().split())
persons = [list(map(int, input().split())) for _ in range(x + y + z)]
persons.sort(key=lambda abc: abc[0] - abc[1])

ans_g = sum(x[0] for x in persons[-x:])
ans_s = sum(x[1] for x in persons[:y])
ans_c = sum(x[2] for x in persons[y:-x])

gold_pq = [a - c for a, b, c in persons[-x:]]
silver_pq = [b - c for a, b, c in persons[:y]]
heapify(gold_pq)
heapify(silver_pq)

ans_f = [0]
for a, b, c in persons[y:-x]:
    np = b - c
    rp = heappushpop(silver_pq, np)
    ans_f.append(np - rp)

ans_b = [0]
for a, b, c in persons[-x - 1:y - 1:-1]:
    np = a - c
    rp = heappushpop(gold_pq, np)
    ans_b.append(np - rp)

ans_f = list(accumulate(ans_f))
ans_b = list(accumulate(ans_b))
print(ans_g + ans_s + ans_c + max(sum(z) for z in zip(ans_f, reversed(ans_b))))
コード例 #46
0
def getheightprofile():
    dataset = get_dataset(tenant_handler.tenant())
    query = request.json

    if not isinstance(
            query, dict
    ) or not "projection" in query or not "coordinates" in query or not "distances" in query or not "samples" in query:
        return jsonify({"error": "Bad query"})

    if not isinstance(query["coordinates"],
                      list) or len(query["coordinates"]) < 2:
        return jsonify(
            {"error": "Insufficient number of coordinates specified"})

    if not isinstance(query["distances"], list) or len(
            query["distances"]) != len(query["coordinates"]) - 1:
        return jsonify({"error": "Invalid distances specified"})

    try:
        epsg = int(
            re.match(r'epsg:(\d+)', query["projection"],
                     re.IGNORECASE).group(1))
    except:
        return jsonify({"error": "Invalid projection specified"})

    try:
        numSamples = int(query["samples"])
    except:
        return jsonify({"error": "Invalid sample count specified"})

    inputSpatialRef = osr.SpatialReference()
    if inputSpatialRef.ImportFromEPSG(epsg) != 0:
        return jsonify({"error": "Failed to parse projection"})

    crsTransform = osr.CoordinateTransformation(inputSpatialRef,
                                                dataset["spatialRef"])
    gtrans = dataset["geoTransform"]

    elevations = []

    x = 0
    i = 0
    p1 = query["coordinates"][i]
    p2 = query["coordinates"][i + 1]
    dr = (p2[0] - p1[0], p2[1] - p1[1])
    cumDistances = list(accumulate(query["distances"]))
    cumDistances.insert(0, 0)
    totDistance = sum(query["distances"])
    for s in range(0, numSamples):
        while i + 2 < len(cumDistances) and x > cumDistances[i + 1]:
            i += 1
            p1 = query["coordinates"][i]
            p2 = query["coordinates"][i + 1]
            dr = (p2[0] - p1[0], p2[1] - p1[1])

        mu = (x - cumDistances[i]) / (cumDistances[i + 1] - cumDistances[i])
        pRaster = crsTransform.TransformPoint(p1[0] + mu * dr[0],
                                              p1[1] + mu * dr[1])

        # Geographic coordinates to pixel coordinates
        col = (-gtrans[0] * gtrans[5] + gtrans[2] * gtrans[3] -
               gtrans[2] * pRaster[1] + gtrans[5] * pRaster[0]) / (
                   gtrans[1] * gtrans[5] - gtrans[2] * gtrans[4])
        row = (-gtrans[0] * gtrans[4] + gtrans[1] * gtrans[3] -
               gtrans[1] * pRaster[1] + gtrans[4] * pRaster[0]) / (
                   gtrans[2] * gtrans[4] - gtrans[1] * gtrans[5])

        data = dataset["band"].ReadRaster(math.floor(col), math.floor(row), 2,
                                          2, 2, 2, gdal.GDT_Float64)
        if not data or len(data) != 32:
            elevations.append(0.)
        else:
            values = struct.unpack('d' * 4, data)
            kRow = row - math.floor(row)
            kCol = col - math.floor(col)
            value = (values[0] * (1. - kCol) + values[1] * kCol) * (
                1. - kRow) + (values[2] *
                              (1. - kCol) + values[3] * kCol) * (kRow)
            elevations.append(value * dataset["unitsToMeters"])

        x += totDistance / (numSamples - 1)

    return jsonify({"elevations": elevations})
コード例 #47
0
from itertools import accumulate

import numpy as np

with open('A-large.in') as f:
    cases = int(f.readline())
    
    for i in range(cases):
        ringers = 0
        
        max_shyness, audience = f.readline().split()
        audience = [int(shyness) for shyness in audience]
        
        cumulative = np.array(list(accumulate(audience)))
        threshold = np.arange(int(max_shyness) + 1)

        diff = threshold - cumulative
        
        if np.max(diff) >= 0:
            ringers = np.max(diff) + 1
            
        print("Case #{0}: {1}".format(i + 1, ringers))
コード例 #48
0
#! python3
"""The sequence of triangle numbers is generated by adding the natural numbers.
What is the value of the first triangle
number to have over five hundred divisors?"""
from itertools import accumulate
from itertools import count
from math import sqrt
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(__file__)))
from utils import sixn


def factors_n(n):
    fac = 1
    for i in sixn(int(sqrt(n)) + 1):
        x = 1
        while not n % i:
            x += 1
            n //= i
        fac *= x
    if n != 1:
        fac *= 2
    return fac


for i in accumulate(count()):
    if factors_n(i) > 500:
        print(i)
        break
コード例 #49
0
 def __init__(self, w):
     self.w = list(itertools.accumulate(w))
コード例 #50
0
ファイル: 2010793.py プロジェクト: qifanyyy/CLCDSA
# -*- coding: utf-8 -*-
from itertools import accumulate


def inpl():
    return tuple(map(int, input().split()))


N = int(input())
S = [0 for _ in range(10**5 + 1)]
P = [[0] * 3 for _ in range(10**5 + 1)]
A = []

for _ in range(N):
    R, H = inpl()
    P[R][H - 1] += 1
    S[R] += 1
    A.append([R, H])
S = list(accumulate(S))

for R, H in A:
    w = S[R - 1] + P[R][H % 3]
    d = P[R][H - 1] - 1
    l = N - w - d - 1

    print("{} {} {}".format(w, l, d))
コード例 #51
0
ファイル: solution.py プロジェクト: markopuzav/aoc-2020
 def find_break(pos, ns):
     cumsum = list(accumulate(xmas[pos:]))
     return cumsum.index(weakness) if weakness in cumsum else None
コード例 #52
0
 def MakeBinNcum(self):
     self.binned.update(
         ncum=list(it.accumulate(reversed(self.binned['n']))))
     self.binned.update(
         ncum_event=list(it.accumulate(reversed(self.binned['n_event']))))
コード例 #53
0
        def _compute_evas(self, ts: int) -> None:
            # Update durable counters and compute per-class and total hit rates

            total_hits: int = 0
            total_events: int = 0

            class_hit_rates: Dict[Hashable, array[float]] = {}

            for clas, info in self._class_infos.items():
                info.durable_hit_counters.update(info.hit_counters,
                                                 self._ewma_factor)
                info.durable_eviction_counters.update(info.eviction_counters,
                                                      self._ewma_factor)
                info.hit_counters.reset()
                info.eviction_counters.reset()

                class_hit_rates[clas] = reversed_array(
                    'd',
                    map(
                        lambda x: lenient_div(x[0], (x[0] + x[1])),
                        itertools.accumulate(
                            zip_longest_reversed_arrays(
                                info.durable_hit_counters.bin_data,
                                info.durable_eviction_counters.bin_data,
                            ),
                            lambda a, b: (a[0] + b[0], a[1] + b[
                                1]),  # functools.partial(map, operator.plus)
                        ),
                    ))

                total_hits += info.durable_hit_counters.total
                total_events += info.durable_hit_counters.total + info.durable_eviction_counters.total

            total_hit_rate: float = lenient_div(
                total_hits, total_events
            )  # TODO: might be zero because EWMA leads to events disappearing

            # per_access_gain is the expected hit rate for a single item in the cache (i.e. a line or a byte).
            per_access_gain = total_hit_rate / self._count_of_items_in_cache()

            time_interval = ts - self._last_eva_computation_ts
            if time_interval == 0:
                # If the last eva computation happened in the same second, pretend it was one second ago
                time_interval = 1
            per_age_bin_width_avg_accesses = self._age_bin_width * total_events / time_interval  # TODO: this is a very rough "estimate"
            # average gain of an item in the cache during a duration of age_bin_width
            per_age_bin_width_avg_gain = per_access_gain * per_age_bin_width_avg_accesses

            # Calculate per-class EVAs

            print()
            pprint({
                'time_interval': time_interval,
                'ts': ts,
                'self._last_eva_computation_ts': self._last_eva_computation_ts,
                'self._age_bin_width': self._age_bin_width,
                'per_age_bin_width_avg_accesses':
                per_age_bin_width_avg_accesses,
                'per_age_bin_width_avg_gain': per_age_bin_width_avg_gain,
                'total_events': total_events,
                'total_hit_rate': total_hit_rate,
            })

            for clas, info in self._class_infos.items():
                max_counters_length = max(
                    len(info.durable_hit_counters.bin_data),
                    len(info.durable_eviction_counters.bin_data),
                )
                last_bin_events = (
                    array_get(info.durable_hit_counters.bin_data,
                              max_counters_length - 1) +
                    array_get(info.durable_eviction_counters.bin_data,
                              max_counters_length - 1))

                # TODO: use centre of the bin for calculations.

                info.evas.set_bin_data(
                    reversed_array(
                        'd',
                        map(
                            # (cumulative_hits - per_age_bin_width_avg_gain * cumulative_lifetimes) / (cumulative_hits + cumulative_evictions)
                            lambda x: lenient_div(
                                (x[1] - per_age_bin_width_avg_gain * x[0]),
                                (x[1] + x[2])),
                            # input x: (cumulative_lifetimes, cumulative_hits, cumulative_evictions) in reverse order of bins.
                            # cumulative_lifetimes is the sum of all future lifetimes. divide this by the number of future
                            # events to get the expected lifetime (in units of age bin width).
                            # cumulative_hits is the number of all future hits.
                            # cumulative_evictions is the number of all future evictions.
                            map(
                                # interpolate: centre of the age bin
                                lambda x: (x[0] + x[5] / 2, x[1] + x[3] / 2, x[
                                    2] + x[4] / 2),
                                itertools.accumulate(
                                    zip_longest_reversed_arrays(
                                        array(
                                            'q',
                                            itertools.repeat(
                                                0, max_counters_length)),
                                        array(
                                            'q',
                                            itertools.repeat(
                                                0, max_counters_length)),
                                        array(
                                            'q',
                                            itertools.repeat(
                                                0, max_counters_length)),
                                        info.durable_hit_counters.bin_data,
                                        info.durable_eviction_counters.
                                        bin_data,
                                        # accumulate yields the first input item (the last item here) as is, so
                                        # setting it to the correct value is necessary:
                                        array('q',
                                              [0] * (max_counters_length - 1) +
                                              [last_bin_events]),
                                    ),
                                    lambda acc, inp: (
                                        acc[0] + acc[
                                            5
                                        ],  # last cumulative lifetime counter + increase
                                        acc[1] + acc[
                                            3
                                        ],  # increase by previous hit counter value
                                        acc[2] + acc[
                                            4
                                        ],  # increase by previous eviction counter value
                                        inp[
                                            3
                                        ],  # next hit counter value (carried forward from input)
                                        inp[
                                            4
                                        ],  # next eviction counter value (carried forward from input)
                                        (acc[1] + acc[3] + inp[3]) +
                                        (acc[2] + acc[4] + inp[4]
                                         ),  # next increase of
                                        # the cumulative life time counter: the next value of cumulative hits + next
                                        # value of cumulative evictions
                                    ),
                                ),
                            ),
                        )))

                pprint({
                    'clas':
                    clas,
                    'per_age_bin_width_avg_gain':
                    per_age_bin_width_avg_gain,
                    'hits':
                    info.durable_hit_counters.bin_data,
                    'evictions':
                    info.durable_eviction_counters.bin_data,
                    'max_counters_length':
                    max_counters_length,
                    'last_bin_events':
                    last_bin_events,
                    'accumulation':
                    list(
                        itertools.accumulate(
                            zip_longest_reversed_arrays(
                                array('q',
                                      itertools.repeat(0,
                                                       max_counters_length)),
                                array('q',
                                      itertools.repeat(0,
                                                       max_counters_length)),
                                array('q',
                                      itertools.repeat(0,
                                                       max_counters_length)),
                                info.durable_hit_counters.bin_data,
                                info.durable_eviction_counters.bin_data,
                                # accumulate yields the first input item (the last item here) as is, so
                                # setting it to the correct value is necessary:
                                array('q', [0] * (max_counters_length - 1) +
                                      [last_bin_events]),
                            ),
                            lambda acc, inp: (
                                acc[0] + acc[
                                    5
                                ],  # last cumulative lifetime counter + increase
                                acc[1] + acc[
                                    3
                                ],  # increase by previous hit counter value
                                acc[2] + acc[
                                    4
                                ],  # increase by previous eviction counter value
                                inp[
                                    3
                                ],  # next hit counter value (carried forward from input)
                                inp[
                                    4
                                ],  # next eviction counter value (carried forward from input)
                                (acc[1] + acc[3] + inp[3]) +
                                (acc[2] + acc[4] + inp[4]),  # next increase of
                                # the cumulative life time counter: the next value of cumulative hits + next
                                # value of cumulative evictions
                            ),
                        )),
                    'evas':
                    info.evas.bin_data,
                })

            # Apply 'reused' bias

            for clas, info in self._class_infos.items():
                reused_class = _ReusedClassifier.to_reused(clas)
                bias: float
                if (reused_class in self._class_infos
                        and len(class_hit_rates[reused_class]) > 0
                        and class_hit_rates[reused_class][0] != 1.0):
                    bias = (self._class_infos[reused_class].evas[0] /
                            (1.0 - class_hit_rates[reused_class][0]))
                else:
                    continue

                for bin_edge, class_hit_rate in zip(info.evas,
                                                    class_hit_rates[clas]):
                    info.evas[bin_edge] += (class_hit_rate -
                                            total_hit_rate) * bias

                pprint({
                    'clas': clas,
                    'evas (bias applied)': info.evas.bin_data,
                })

            # Reset counters

            self._accesses_since_eva_computation = 0
            self._last_eva_computation_ts = ts
コード例 #54
0
from itertools import accumulate
n = int(input())
a = list(map(int, input().rstrip().split()))
b = list(accumulate(a))
q = int(input())
for i in range(q):
    l, r = list(map(int, input().split()))
    if l == r:
        print(a[l])
    else:
        print(b[r] - b[l] + a[l])
コード例 #55
0
        m.agent.play_round()
        m.record_metrics()

x = range(1, rounds + 1)

optimal_regret = [0] * rounds
pylab.plot(x, optimal_regret, label='optimal', linestyle='--')

for m in agents:
    y = m.pseudo_regret
    pylab.plot(x, y, label=m.agent.algorithm)

pylab.legend(loc='upper left')
pylab.xlabel('t')
pylab.ylabel('katumus')
save(f'regret_{rounds}')
pylab.show()

optimal_reward = list(accumulate([rewards.best_action().expected] * rounds))
pylab.plot(x, optimal_reward, label='optimal', linestyle='--')

for m in agents:
    y = m.reward
    pylab.plot(x, y, label=m.agent.algorithm)

pylab.xlabel('t')
pylab.ylabel('tuotto')
pylab.legend(loc='upper left')
save(f'reward_{rounds}')
pylab.show()
def getStreaks(order, today):
    # At first I get all the times the habit was repeated from the order.checkedList of the Order.object.
    date_array = list(order.checkedList.all())
    # "list_of_days_since_first_repeat" saves the dates with all the dates from the very first one ever to today.
    list_of_days_since_first_repeat = []
    # "list_of_repeat_days" saves the dates which were checked
    list_of_repeat_days = []
    # "week_habit" appends all weeks which passed since the first time anything was checked.
    week_habit = []
    # the first week is initalized today.
    week_habit_date = today
    # Below we get all the Repeats ever created.
    repeats = Repeats.objects.all()
    # first_time_stamp is first set to "None" in order not to confuse the system in case there are no repeats-objects to it yet, because
    # it hasen't been checked yet.
    first_time_stamp = None
    # Earliest throws an exception if there are no Repeats. This is why below there is the try-except block.
    try:
        first_time_stamp = repeats.earliest('dateAsString')
    except:
        pass
    # That is done below in order to avoid none in the queryset
    if first_time_stamp:
        # first_repeats is really just the first_time_stamp just parsed to be a date and not a string.
        first_repeats = parse_date(first_time_stamp.dateAsString)
        # If today is acutally todays's date then lastRepats stands for yesterday. We do this so that the current streak
        # is not automatically 0 if you haven't pressed it today
        last_repeats = today - timedelta(days=1)
        # time_stamp_deltas gives us a timedelta-object from yesterday to the first time anything was ever checked.
        time_stamp_deltas = last_repeats - first_repeats

        # In this the list_of_days_since_first_repeat is produced.
        for k in range(time_stamp_deltas.days + 1):
            time_stamp_day = first_repeats + timedelta(days=k)
            list_of_days_since_first_repeat.append(time_stamp_day)

        # I start today and walk backwards. Here I get all the weeks. Regardless if they were checked or not.
        while week_habit_date > first_repeats:
            #  7 days are subtracted from today
            week_habit_date -= timedelta(days=7)
            # This week is not included, in order to be a streak, even if this week it wasn't checked yet.
            week_habit.append(week_habit_date)
        # Weekdays are received in reverse order.
        week_habit.reverse()

        # date_array is an array which has all the days which were checked
        for repeat in date_array:
            repeated_days = parse_date(repeat.dateAsString)
            list_of_repeat_days.append(repeated_days)
    # The data type "Set" gets rid of all duplicates.
    checked_days_array = set(list_of_repeat_days)

    # inCheckedDays is there in order not to need exact matches. The timedelta establishes the week in the future.
    def inCheckedDays(x, checked_days):
        # x is whatever day of the week today is.
        for i in checked_days:
            # The line below determines whether i is contained in the week starting at x.
            # That is determined by wheater i is on or after x and before the week after x.
            if x <= i < x + timedelta(days=7):
                return True
        return False

    def tryingWeekly(a, x):
        count_current_before, longest_streak_before = a
        count_current_after = count_current_before + 1
        # x in this case is the same weekday of whatever weekday is "today". x will eventually take all the values of
        # each of the current weekday in each week from today to the date of the first repeat except for this week.
        if inCheckedDays(x, checked_days_array):
            return (count_current_after, count_current_after
                    if count_current_after > longest_streak_before else
                    longest_streak_before)
        else:
            return (0, longest_streak_before)

    def tryingDaily(a, x):
        # a is a tuple
        count_current_before, longest_streak_before = a
        # if x (one date in checked_days_array) is in the checked days the current streak will be updated by 1, else it will be set to 0
        count_current_after = count_current_before + 1 if x in checked_days_array else 0

        return (count_current_after, count_current_after
                if count_current_after > longest_streak_before else
                longest_streak_before)

    # Initial=(0,0) stands for the tuple a that is being initialized with 0,0
    result = list(
        accumulate(list_of_days_since_first_repeat,
                   tryingDaily,
                   initial=(0, 0))) if order.interval == "Daily" else list(
                       accumulate(week_habit, tryingWeekly, initial=(0, 0)))
    # [-1] is for the last tuple. The second [] stands for either the currentStreak [0] or the longestStreak[1].
    # I used accumulate insted of functools reduce to help debug the result and to grasp the intermediate steps.
    order.longestStreak = result[-1][1]
    order.streak = result[-1][0]
    order.save()
コード例 #57
0
def idfn(sections_data):
    '''Returns the name of the test according to the parameters'''
    num_l = len(sections_data.splitlines())
    return 'sections_{}'.format(num_l)
import itertools
import operator
# list of mandatory sections
data = '''version: 1
lattice: asd
states: asd
excitations: asd
sensitizer_decay: asd
activator_decay: asd'''
# combinations of sections. At least 1 is missing
list_data = list(itertools.accumulate(data.splitlines(keepends=True)[:-1], operator.concat))
@pytest.mark.parametrize('sections_data', list_data, ids=idfn)
def test_sections_config(sections_data):
    with pytest.raises(SettingsFileError) as excinfo:
        with temp_config_filename(sections_data) as filename:
            settings.load(filename)
    assert excinfo.match(r"Those sections must be present")
    assert excinfo.match(r"Sections that are needed but not present in the file")
    assert excinfo.type == SettingsFileError

# should get a warning for an extra unrecognized section
def test_extra_sections_warning_config():
    data = data_all_mandatory_ok+'''extra_unknown_section: dsa'''
    with pytest.warns(SettingsExtraValueWarning) as warnings:
        with temp_config_filename(data) as filename:
            settings.load(filename)
コード例 #58
0
print()
print('Using num2 with Take While Function > return value if True')
fltr_t = itertools.takewhile(lt_2, num2)
for i in fltr_t:
    print(i)
# After hiting the first value verifying the condition, it discard the rest of
# the iterables, regardless of its value, or the condition.
print()

#######################
# Accumulate Function #
#######################
# Keeps a running total (Sum by default) of the iterables
print('Accumulates Function')
print('default == Sum')
acc_default = itertools.accumulate(num2)
for i in acc_default:
    print(i)
print()
print('Specific == multiply')
num3 = [1, 2, 3, 2, 1, 0]
print("using num3 = [1, 2, 3, 2, 1, 0]")
acc_mul = itertools.accumulate(num3, operator.mul)
for i in acc_mul:
    print(i)
# Works if the first iterable isn't 0,
# or everything in the iterable will be == 0
print()

#############
# Group Val #
コード例 #59
0
from itertools import accumulate

N, M, K = map(int, input().split())
a = [0] + list(accumulate(int(i) for i in input().split()))
b = [0] + list(accumulate(int(i) for i in input().split()))

cnt = 0
best0 = M
for i in range(N + 1):
    ai = a[i]
    for j in range(best0, -1, -1):
        bj = b[j]
        if ai + bj <= K:
            cnt = max(cnt, i + j)
            best0 = j
            break

print(cnt)
コード例 #60
0
def iterating(initial_array):
    return list(accumulate(initial_array))