def solve(data): grid = data base = grid for j in range(1, 5): appendix = base + j appendix[appendix > 9] -= 9 grid = np.concatenate((grid, appendix), axis = 0) base = grid for i in range(1, 5): appendix = base + i appendix[appendix > 9] -= 9 grid = np.concatenate((grid, appendix), axis = 1) h, w = np.shape(grid) graph = Graph() for j in range(0, h): for a, b in pairwise(range(0, w)): graph.add_edge(j * h + a, j * h + b, grid[j][b]) graph.add_edge(j * h + b, j * h + a, grid[j][a]) for i in range(0, w): for a, b in pairwise(range(0, h)): graph.add_edge(a * h + i, b * h + i, grid[b][i]) graph.add_edge(b * h + i, a * h + i, grid[a][i]) return find_path(graph, 0, h * w - 1).total_cost
def insert(start, ruleSet, steps): occurences = Counter() pairOccurences = Counter() for x in start: occurences[x] += 1 pairs = pairwise(start) for pair in pairs: pairOccurences[pair] += 1 newPairOccurences = deepcopy(pairOccurences) for _ in range(steps): pairOccurences = deepcopy(newPairOccurences) for (x, y), quantity in pairOccurences.items(): if quantity == 0: continue newElement = ruleSet[(x, y)] occurences[newElement] += quantity newPairOccurences[(x, newElement)] += quantity newPairOccurences[(newElement, y)] += quantity newPairOccurences[(x, y)] -= quantity return occurences
def parts1and2(template, repl, num): pairs = Counter(pairwise(template)) for _ in range(num): pairs = _round2(pairs, repl) counts = _count_elements(template[0], template[-1], pairs) return max(counts.values()) - min(counts.values())
def insert(polymer, transformations): inserted_polymer = [] for p in pairwise(polymer): inserted_polymer += transform(p, transformations) inserted_polymer.append(polymer[-1]) return "".join(inserted_polymer)
def transform(scid, point): path = paths[(0, scid)] for v, w in reversed(list(itertools.pairwise(path))): po = lor(point)[oris[(v, w)]] origin = pos[(v, w)] point = tuple(ac + bc for ac, bc in zip(origin, po)) return point
def magnitude(self): levels = self.levels numbers = self.numbers for level_curr in range(max(levels), 0, -1): numbers_new = [] levels_new = [] paired = False for inds in itertools.pairwise(range(len(numbers))): i, j = inds if levels[i] == levels[j] == level_curr: if paired: paired = False else: res = 3 * numbers[i] + 2 * numbers[j] numbers_new.append(res) levels_new.append(levels[i] - 1) paired = True else: if not paired: numbers_new.append(numbers[i]) levels_new.append(levels[i]) else: paired = False if j == len(numbers) - 1: numbers_new.append(numbers[j]) levels_new.append(levels[j]) numbers = numbers_new levels = levels_new return numbers[0]
def start_pair_insertion_process(template: List[str], rules: Dict[str, str]) -> int: recipe: DefaultDict[str, int] = defaultdict(int) polymer_counter = Counter("".join(template)) for polymer_pair in pairwise(template): if "".join(polymer_pair) in recipe: recipe["".join(polymer_pair)] += 1 else: recipe["".join(polymer_pair)] = recipe.get("".join(polymer_pair), 1) for _ in range(NUMBER_OF_STEPS): pairs = {} for pair, result in rules.items(): if pair in recipe and recipe[pair] > 0: pairs.update({pair: result}) new_pairs: DefaultDict[str, int] = defaultdict(int) for pair, result in pairs.items(): polymer_counter[result] += recipe[pair] first_pair = pair[0] + result second_pair = result + pair[1] new_pairs[first_pair] += recipe[pair] new_pairs[second_pair] += recipe[pair] recipe[pair] = 0 for pair, value in new_pairs.items(): recipe[pair] = value most_commons = polymer_counter.most_common(len(polymer_counter)) return most_commons[0][1] - most_commons[-1][1]
def generate_polymer(rules: Rules, template: str, steps: int = 40) -> int: """ Solves part 2. Consider a rule AB -> C. If a polymer has 10 pairs AB, 10 new AC and CB pairs are created, and the 10 AB pairs are "broken". The resulting 10 C elements are added to the polymer. """ counts = Counter(template) pairs = Counter(pairwise(template)) step = 0 while step < steps: for (left, right), count in Counter(pairs).items(): result = rules[left, right] # These pairs are created in one step pairs[left, result] += count pairs[result, right] += count # The original pair is broken in one step pairs[left, right] -= count # Add to current count of the new element counts[result] += count step += 1 least_common, *_, most_common = sorted(counts.items(), key=itemgetter(1)) return most_common[1] - least_common[1]
def alienOrder(self, words: List[str]) -> str: graph, s = defaultdict(list), set() for w in words: s = s.union(set(w)) d = [0] * 26 # pairwise(x) Return an iterator of overlapping pairs taken from the input iterator. ABCD -> AB BC CD for a, b in pairwise(words): for ca, cb in zip(a, b): # 说明ca排在cb之前 拓扑可以变为 ca->cb ca出度+1 cb入度+1 if ca != cb: graph[ca].append(cb) d[ord(cb) - ord('a')] += 1 break # 长度不同 else: # abc > ab 这种不符合规则 ab abc 这种无法比较字母的字典序 if len(a) > len(b): return "" start = [k for k in s if d[ord(k) - ord('a')] == 0] # 拓扑遍历 依次获取入度为0的点 for ch in start: for nxt in graph[ch]: d[v := ord(nxt) - ord('a')] -= 1 if not d[v]: start.append(nxt) return "".join(start) if len(start) == len(s) else ""
def part2(a): return len([ i for i in range(*a) if any(len(list(a[1])) == 2 for a in groupby(str(i))) and all(a <= b for a, b in pairwise(str(i))) ])
def can_multiply(*matrices): """Does m0 @ m1 @ ... @ mn make sense?""" for a, b in itertools.pairwise(matrices): n, m = a.shape k, l = b.shape if m != k: return False return True
def build_graph(input_data: list[list[int]]) -> nx.Graph: size = len(input_data) graph = nx.DiGraph() for i, line in enumerate(input_data): for j, risk in enumerate(line): graph.add_node((i, j), risk=risk) for i1, i2 in pairwise(range(size)): for j in range(size): graph.add_edge((i1, j), (i2, j), risk=graph.nodes[i2, j]["risk"]) graph.add_edge((i2, j), (i1, j), risk=graph.nodes[i1, j]["risk"]) for i in range(size): for j1, j2 in pairwise(range(size)): graph.add_edge((i, j1), (i, j2), risk=graph.nodes[i, j2]["risk"]) graph.add_edge((i, j2), (i, j1), risk=graph.nodes[i, j1]["risk"]) return graph, size
def apply_pair_insertion(template, rules, *, steps): pairs = Counter(map("".join, pairwise(template))) for _ in range(steps): counts = pairs.items() pairs = Counter() for pair, count in counts: for new_pair in rules[pair]: pairs[new_pair] += count return pairs
def __init__(self, iterable): prev = None for prev_token, token in pairwise(iterable): if prev is None: self.head = Node(token=prev_token) prev = self.head v = Node(token=token) prev.next = v prev = v
def __call__(self, segment): """Add links between segment nodes to those previously collected.""" try: nodes = [segment.downstream._nodes[-1]] except AttributeError: nodes = [segment._nodes[0]] nodes.extend(segment._nodes[1:]) for head, tail in pairwise(nodes): self._links.append((head, tail))
def part1(inp): current = inp[0] for _ in range(10): s = "" for a, b in pairwise(current): s += a + inp[1][a + b] current = s + current[-1] v = sorted(Counter(current).values()) return v[-1] - v[0]
def process_fact(self): _, fact = self.request.encoded_text.split("bbot:-") if "-is-" in fact: unknown_subject, description = fact.split("-is-") elif "-are-" in fact: unknown_subject, description = fact.split("-are-") else: return "yo" subject = None adjective = None for word in unknown_subject.split("-"): subject = Subject.objects.filter(name=word).first() if subject: unknown_subject = None self.request.bot.unknown_subject = None if not subject: for word_one, word_two in pairwise(unknown_subject.split("-")): possible_subject = f"{word_one}-{word_two}" subject = Subject.objects.filter(name=possible_subject).first() if subject: unknown_subject = None self.request.bot.unknown_subject = None # We have a known subject for word in description.split("-"): possible_adjective = Phrase.objects.filter( text=word, kind="ADJECTIVE").first() if possible_adjective: # we have a known adjective adjective = possible_adjective for word_one, word_two in pairwise(description.split("-")): possible_adjective = f"{word_one}-{word_two}" adjective = Phrase.objects.filter(text=possible_adjective, kind="ADJECTIVE").first() self.request.bot.save() return self.follow_up(subject=subject, adjective=adjective, unknown_subject=unknown_subject, unknown_adjective=description)
def magnitude(num): num = num[:] while len(num) > 1: for i, ((n, d), (nn, dd)) in enumerate(pairwise(num)): if d == dd: num[i] = [3*n+2*nn, d-1] del num[i+1] break assert num[0][1] == 0 return num[0][0]
def process(rules, polymer, remaining_steps): if remaining_steps == 0: return polymer new_polymer = "" for first_elem, second_elem in pairwise(polymer): new_polymer += first_elem new_polymer += rules[first_elem + second_elem] new_polymer += polymer[-1] return process(rules, new_polymer, remaining_steps - 1)
def run_polymerase(polymer: str, rules: dict[str, str], steps: int = 1) -> int: pairs = dict(Counter(pairwise(polymer)).most_common()) for s in range(steps): new_pairs = defaultdict(int) for (l, r), total in pairs.items(): if i := rules.get(f"{l}{r}", ""): new_pairs[f"{l}{i}"] += total new_pairs[f"{i}{r}"] += total else: new_pairs[f"{l}{r}"] += total pairs = new_pairs
def calc_route_fitness(route: np.array, weights: np.array) -> float: edges = pairwise(route) def get_weight(edge) -> float: return weights[edge[0]][edge[1]] def accumulator(s, edge): return s + get_weight(edge) fitness_result = reduce(accumulator, edges, 0) return fitness_result
def alienOrder(self, words: List[str]) -> str: graph = collections.defaultdict(list) for s, t in itertools.pairwise(words): for u, v in zip(s, t): if u != v: graph[u].append(v) break else: if len(s) > len(t): return '' states = {} order = []
def sanity(c): if c[0] in ops: return False if c[5] in ops: return False for p in itertools.pairwise(c): if p in op_doubles: return False else: pass return True
def difference_minmax_frequency(input=parse(), remaining_steps=40): polymer, rules = input frequencies = [ frequency(rules, first_elem, second_elem, remaining_steps) for first_elem, second_elem in pairwise(polymer) ] global_frequency = {polymer[-1]: 1} for f in frequencies: global_frequency = merge_frequency(global_frequency, f) return max(global_frequency.values()) - min(global_frequency.values())
def evolve_polymer(polymer, rules, iterations): for iteration in range(iterations): new_polymer = [polymer[0]] for pair in pairwise(polymer): if pair in rules: new_polymer.append(rules[pair]) new_polymer.append(pair[1]) polymer = "".join(new_polymer) return polymer
def counts(*, rules: str, text: str, steps: int) -> dict: translations = load_translations(rules) if steps == 0: return Counter(text) counter = Counter({text[0]: 1}) for a, b in pairwise(text): c = translations.get(a + b, '') counter.update(counts(rules=rules, text=a + c + b, steps=steps - 1)) counter[a] -= 1 return counter
def grow_polymer(steps: int) -> int: counter = Counter(map(lambda p: "".join(p), pairwise(polymer))) for _ in range(steps): new_counter = defaultdict[str, int](int) for pair, count in counter.items(): new_counter[replace_rules[pair][0]] += count new_counter[replace_rules[pair][1]] += count counter = new_counter letters = defaultdict[str, int](int) for pair, count in counter.items(): letters[pair[0]] += count letters[polymer[-1]] += 1 # last char never gets replaced return max(letters.values()) - min(letters.values())
def part2(): steps = read_input() x_divisions = set() y_divisions = set() z_divisions = set() for step in steps: x_divisions.add(step.xmin) x_divisions.add(step.xmax + 1) y_divisions.add(step.ymin) y_divisions.add(step.ymax + 1) z_divisions.add(step.zmin) z_divisions.add(step.zmax + 1) x_divisions = sorted(x_divisions) y_divisions = sorted(y_divisions) z_divisions = sorted(z_divisions) shape = (len(x_divisions) - 1, len(y_divisions) - 1, len(z_divisions) - 1) cuboids = np.zeros(shape, bool) for step in steps: value = (step.action == 'on') cuboids[ x_divisions.index(step.xmin):x_divisions.index(step.xmax + 1), y_divisions.index(step.ymin):y_divisions.index(step.ymax + 1), z_divisions.index(step.zmin):z_divisions.index(step.zmax + 1) ] = value x_weights = np.array([x2 - x1 for x1, x2 in pairwise(x_divisions)]) y_weights = np.array([y2 - y1 for y1, y2 in pairwise(y_divisions)]) z_weights = np.array([z2 - z1 for z1, z2 in pairwise(z_divisions)]) return np.einsum('xyz,x,y,z->', cuboids, x_weights, y_weights, z_weights)
def parse_input(input_file: TextIO) -> tuple[dict[str, int], dict[str, str]]: polymer = next(input_file).strip() next(input_file) # skip blank line rules: dict[str, str] = {} for line in input_file: k, v = line.strip().split(" -> ") rules[k] = v polymer_counts: defaultdict[str, int] = defaultdict(int) for element in polymer: polymer_counts[element] += 1 for pair in pairwise(polymer): polymer_counts["".join(pair)] += 1 return polymer_counts, rules
def get_polymer_element_frequencies( polymer: str, rules: Rules, generations: int ) -> dict[tuple[str, str], int]: counts = Counter(pairwise(polymer)) for _ in range(1, generations + 1): new_count = defaultdict(lambda: 0) for (a, b), count in counts.items(): insert = rules[a, b] new_count[a, insert] += count new_count[insert, b] += count counts = new_count return counts
def functional_ascending_impl(seq): return all(l < r for l, r in pairwise(seq))