def parse_input(): node_dict = {} # for line in test_input().splitlines(): for line in utils.read_input(12): start, others = re.match(r'([\d]*) <-> (.*)', line).groups() nums = [int(n.strip()) for n in others.split(',')] node_dict[int(start)] = nums return node_dict
def main(separator='\t'): """ Choose the next node to be added to the subgraph. Take as input: - iterable of nodes (ordered) as key - iterable of iterable as value """ data = read_input(sys.stdin) for nodes, neighbours_iterable in data: nodes = eval(nodes) neighbours_iterable = eval(neighbours_iterable) next_nodes = choose_nodes(nodes, neighbours_iterable) for n in next_nodes: print("{}\t{}".format(sorted(nodes + (n, )), neighbours_iterable))
def main(separator='\t'): """ Join the neighbours_iterables from the reducer. Receives as key an ordered iterable of nodes (the subgraph). Builds the the next neighbours_iterable filtering duplicates. """ data = read_input(sys.stdin, separator=separator) for key, value in groupby(data, itemgetter(0)): next_iterable = set() for i in value: for t in eval(i[1]): next_iterable.add(t) next_tuple = tuple(next_iterable) key = tuple(eval(key)) # Check for the density! if density(key, next_tuple) >= RHO: with open("Output/FOUND", "w") as f: print("{}".format(key)) print("{}".format(key), file=f) return print("{}\t{}".format(key, next_tuple))
import sys import utils from dbscan import * if __name__ == "__main__": filename = sys.argv[1] points = utils.read_input(filename) dbscan(points, 0.5) # it will labeled cluster of elements of points utils.visualize(points)
def spatial_pooler(images, shape, p_connect=0.15, connect_threshold=0.2, p_inc=0.02, p_dec=0.02, b_inc=0.005, p_mult=0.01, min_activity_threshold=0.01, desired_activity_mult=0.05, b_max=4, max_iterations=10000, cycles_to_save=100, output_file=None): """ Implements the main BSP loop (p. 3). It goes continually through the images set until convergence. :param images: set of images to learn from. It is an array of shape (l, m, n) where (l, m) == (shape[0], shape[1]) and (l, m) == (shape[2], shape[3]) :param shape: the shape of the output array. It must have 4 components, and (shape[0], shape[1]) == (shape[2], shape[3]). :param p_connect: probability of the columns mapped to [i, j] to have a potential synapse to coordinates [k, l], for all i in shape[0], j in shape[1], k in shape[2], and l in shape[3]. :param connect_threshold: threshold over which a potential synapse is considered *connected*. All potential synapses start with a permanence value within 0.1 of this parameter. :param p_inc: the BSP'perm pInc parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be incremented. :param p_dec: the BSP'perm pDec parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be decremented. :param b_inc: the BSP'perm bInc parameter (p. 4). A float that indicates the amount by which a column'perm boost must be incremented. :param p_mult: the BSP'perm pMult parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be multiplied. :param min_activity_threshold: the BSP's minActivityThreshold parameter (p. 4). :param desired_activity_mult: the BSP's desiredActivityMult parameter (p. 4). :param b_max: the ASP'perm bMax parameter (p. 6). A float that indicates the amount by which a synapse'perm permanence must be multiplied. :param max_iterations: an integer indicating the maximum number of runs through the set of images allowed. Pass None if no limit is desired. :param cycles_to_save: wait this number of iterations over the complete set of images before saving the columns to disk. :param output_file: file name used to save the pickled columns. :return: a matrix *columns* of shape *shape*, created and modified according to the BSP learning algorithm. """ # Initialize boost matrix. boost = np.ones(shape=shape[:2]) part_rbuffer = partial(RingBuffer, input_array=np.zeros(1000, dtype=np.bool), copy=True) # Initialize activity dictionary. activity = defaultdict(part_rbuffer) # Initialize columns and distances matrices. pprint("Initializing synapses ...") columns, distances = initialise_synapses(shape, p_connect, connect_threshold) pprint("Columns:") random_rows = np.random.randint(0, shape[0], 2) random_cols = np.random.randint(0, shape[1], 2) pprint(columns[random_rows, random_cols]) pprint("Distances:") pprint(distances[random_rows, random_cols]) # Calculate the inhibition_area parameter. pprint("Calculating inhibition area ...") inhibition_area = update_inhibition_area(columns, connect_threshold) pprint("Inhibition area: %s" % inhibition_area) # Calculate the desired activity in a inhibition zone. pprint("Calculating desired activity ...") desired_activity = desired_activity_mult * inhibition_area pprint("Desired activity: %s" % desired_activity) converged = False i = 0 # While synapses are modified and the maximum number of iterations is not # overstepped, ... pprint("Starting learning loop ...") start = datetime.now() while not converged and (max_iterations is None or i < max_iterations): # Initialize the synapses_modified array, assuming no synapses will be # modified. synapses_modified = np.zeros(shape=len(images), dtype=np.bool) # For each image *image*, with index *j* in the images set, ... for j, image, _ in read_input(images): # According to the paper (sic): # "minOverlap was dynamically set to be the product of the mean # pixel intensity of the current image and the mean number of # connected synapses for an individual column." # This leaves unclear exactly what is meant by "mean number of # connected synapses for an individual column"; it could be a # historical mean or a mean over all columns, here the latter was # chosen. mean_conn_synapses = (columns[columns > connect_threshold].size / (shape[2] * shape[3])) min_overlap = image.mean() * mean_conn_synapses # calculate the overlap of the columns with the image. # (this is a simple count of the number of its connected synapses # that are receiving active input (p. 3)), ... overlap = calculate_overlap(image, columns, min_overlap, connect_threshold, boost) # force sparsity by inhibiting columns, ... active, activity =\ inhibit_columns(columns, distances, inhibition_area, overlap, activity, desired_activity) # calculate the min_activity matrix, ... min_activity =\ calculate_min_activity(columns, active, distances, inhibition_area, activity, min_activity_threshold) # and finally, adapt the synapse's permanence values. columns, synapses_modified[j] =\ learn_synapse_connections(columns, active, image, p_inc, p_dec, activity, min_activity, boost, b_inc, p_mult, connect_threshold, distances, b_max) # Update the inhibition_area parameter. inhibition_area = update_inhibition_area(columns, connect_threshold) # Update the desired activity in a inhibition zone. desired_activity = desired_activity_mult * inhibition_area # Print a snapshot of the model state every 1000 images. if j % 1000 == 0: pprint("########## %sth image of %sth iteration ##########" % (j+1, i+1)) elapsed = datetime.now() - start elapsed_h = elapsed.total_seconds() // 3600 elapsed_m = (elapsed.total_seconds() // 60) % 60 elapsed_s = elapsed.seconds % 60 pprint("########## Elapsed time: %02d:%02d:%02d ##########" % (elapsed_h, elapsed_m, elapsed_s)) pprint("Overlap:") pprint(overlap[random_rows, random_cols]) pprint("Activity:") for l, key in enumerate(activity.iterkeys()): if l in random_rows: pprint(activity[key][-100:]) pprint("Active:") pprint(active[random_rows, random_cols]) pprint("Min activity:") pprint(min_activity[random_rows, random_cols]) pprint("Inhibition area: %s" % inhibition_area) pprint("Inhibition radius: %s" % (np.sqrt(inhibition_area/np.pi),)) pprint("Desired activity: %s" % desired_activity) pprint("Synapses modified: %s" % synapses_modified[j]) # Check if any synapses changed from connected to disconnected or # vice-versa in the last learning cycle. converged = test_for_convergence(synapses_modified) pprint("Iteration %s. Number of synapses modified: %s" % (i, synapses_modified.sum())) if i % cycles_to_save == 0 or converged: if output_file is not None: with open(output_file, 'wb') as fp: pickle.dump(columns, fp) # Increment iterations counter. i += 1 return columns
import utils import re specs, your, other = utils.read_input(delim='\n\n', test=False) specs = specs.split('\n') specs = { a: [int(b) for b in bs] for a, *bs in [ re.match(r'^([a-z ]+): (\d+)-(\d+) or (\d+)-(\d+)', s).groups() for s in specs ] } intervals = sorted([ a for b in [[[lo1, h1], [lo2, hi2]] for lo1, h1, lo2, hi2 in specs.values()] for a in b ]) sm = 0 valid_rows = [] for row in other.split('\n')[1:]: ns = [int(i) for i in row.split(',')] row_invalid = False for n in ns: invalid = True for (a, b) in intervals: if a <= n and b >= n: invalid = False break if invalid:
import numpy as np from utils import read_input, update_grid, print_grid, score_grid if __name__ == "__main__": grid = read_input() original_grid = np.array(grid) total = 1000000000 visited = {} cycle_start = None cycle_end = None for i in range(total): grid = update_grid(grid) grid_str = ''.join(''.join(row) for row in grid) if grid_str in visited: cycle_start = visited[grid_str] cycle_end = i break visited[grid_str] = i period = cycle_end - cycle_start if total >= cycle_start: total = cycle_start + (total - cycle_start) % period print( f"Cycle starts at {cycle_start} and ends at {cycle_end} with a period of {period}" ) print(f"Will run {total} cycles to obtain the desired result")
import utils import annealing import campos import networkx as nx import matplotlib.pyplot as plt import numpy as np g = utils.read_input("generated_inputs/25.in") g2 = utils.read_input("generated_inputs/50.in") g3 = utils.read_input("generated_inputs/100.in") gsmol = np.ones((5, 5)) - np.eye(5) gsmol2 = np.ones((10, 10)) - np.eye(10) gcomp = np.ones((25, 25)) - np.eye(25) gcomp2 = np.ones((50, 50)) - np.eye(50) gcomp3 = np.ones((100, 100)) - np.eye(100) def test_initial(): G = utils.read_input("inputs/large-14.in") print("Running") plt.figure() nx.draw(utils.mat_to_nx(G)) plt.savefig("/tmp/G.png") state = annealing.initial_fn(G) cost = utils.cost_fn(state) print("Done with cost ", cost) plt.figure() nx.draw(utils.mat_to_nx(state)) plt.savefig("/tmp/tree.png") utils.write_output(state, G, "/tmp/res.txt") assert utils.verify_in_out(G, "/tmp/res.txt")
from utils import operations, read_input if __name__ == "__main__": bound, instructions = read_input() pointer = 0 registry = [0] * 6 registry[0] = 1 while pointer < len(instructions): registry[bound] = pointer instr = instructions[pointer] operation = operations[instr[0]] registry = operation(registry, instr[1]) if pointer == 3: for x in range(1, registry[2] + 1): registry[1] = x if registry[2] % registry[1] == 0: registry[0] += registry[2] // registry[1] break pointer = registry[bound] pointer += 1 print(registry)
from utils import read_input from math import cos, sin, radians navigations = read_input('inputs/input12.txt') def navigate(instruction, pos_x, pos_y, dir): nav = instruction[0] value = int(instruction[1:]) if nav == 'N': pos_y += value elif nav == 'E': pos_x += value elif nav == 'S': pos_y -= value elif nav == 'W': pos_x -= value elif nav == 'R': dir = (dir - value) % 360 elif nav == 'L': dir = (dir + value) % 360 elif nav == 'F': if dir == 0: pos_x += value elif dir == 90: pos_y += value elif dir == 180: pos_x -= value elif dir == 270: pos_y -= value else:
#!/usr/bin/env python """...""" import board,player,engine,validator,utils from board import * from validator import * from player import * from engine import * print "some tests..." print "is_winner tests:" boards = utils.read_input("input") for i in range(len(boards)): print str(boards[i]) print "w=" + is_winner(boards[i]) + "\n" #-------------------------------------- print('\n-----------------\n\n' + 'player.next_move tests:') boards = utils.read_input("input-ai") for i in range(len(boards)): p1=Player(Board.CROSS,Engine.P1,True) p2=Player(Board.CIRC,Engine.P2,True) turn=Board.CROSS print str(i+1) + '.' print str(boards[i]) + "\n" while not boards[i].is_full() and is_winner(boards[i]) == Board.BLANK: if turn == p1.sym:
found = y break if found is None: return x return False def is_invalid(x, prev): found = False for y in set(prev): if x - y in prev: found = y break return found def part2(data): VALUE = 57195069 for size in range(2, len(data)): for i in range(len(data) - size): if sum(data[i:i + size]) == VALUE: print(data[i:i + size]) return max(data[i:i + size]) + min(data[i:i + size]) return False if __name__ == '__main__': data = read_input('input.txt', by_line=True, fn=int) print(part1(data)) print(part2(data))
from utils import read_input, dist if __name__ == "__main__": radii = read_input() removed = [False] * len(radii) while True: noutside = [0] * len(radii) for removedi, (coordi, ri) in zip(removed, radii.items()): if removedi: continue for j, (coordj, rj) in enumerate(radii.items()): if removed[j]: continue if dist(coordi, coordj) > ri + rj: noutside[j] += 1 if sum(noutside) == 0: break maxn = max(noutside) removed = [r or n == maxn for r, n in zip(removed, noutside)] dists = [dist(c, (0,0,0)) - r for removedi, (c, r) in zip(removed, radii.items()) if not removedi] print(f"The distance to (0,0,0) is {max(dists)}")
def add_edges(grid, graph): for x in range(grid.shape[0]): for y in range(grid.shape[1]): target = grid[x, y] if x < grid.shape[0] - 1: source = grid[x + 1, y] for toola in tools_allowed[source]: for toolb in tools_allowed[target]: if toola == toolb: graph.add_edge((x, y, toola), (x + 1, y, toolb), weight=1) if y < grid.shape[1] - 1: source = grid[x, y + 1] for toola in tools_allowed[source]: for toolb in tools_allowed[target]: if toola == toolb: graph.add_edge((x, y, toola), (x, y + 1, toolb), weight=1) if __name__ == "__main__": depth, targetx, targety = read_input() grid = define_grid(targetx, targety, depth, padding=10) graph = define_graph(grid) p = nx.shortest_path(graph, (0, 0, 0), (targetx, targety, 0), weight='weight', method='bellman-ford') duration = sum( [graph.edges[v, u]['weight'] for v, u in zip(p[0:-1], p[1:])]) print(f"The shortest duration is {duration}")
commands = deque([ 'west', 'take mug', 'north', 'take easter egg', 'south', 'east', 'south', 'east', 'north', 'take candy cane', 'south', 'west', 'north', 'east', 'take coin', 'north', 'north', 'take hypercube', 'south', 'east', 'take manifold', 'west', 'south', 'south', 'east', 'take pointer', 'west', 'west', 'take astrolabe', 'north', 'east', 'north', 'drop manifold', 'drop easter egg', 'drop pointer', 'drop candy cane', 'east']) while True: text = [] try: while output := next(computer.generator): text.append(chr(output)) except StopIteration: pass print(''.join(text)) command = commands.popleft() if commands else input() if command == 'exit': return for c in command: output = computer.generator.send(ord(c)) if output: print(chr(output), end='') computer.generator.send(ord('\n')) def part2(raw_input): print("Part 2") return "Happy Holidays!" raw_input = utils.read_input() part1(raw_input) print(part2(raw_input))
from itertools import permutations from typing import Optional, Tuple from utils import read_input INPUT = read_input('day7') def parse_instruction(instruction: int) -> Tuple[int, int, int, int]: return (instruction % 100 // 1, instruction % 1000 // 100, instruction % 10000 // 1000, instruction % 100000 // 10000) class Amplifier: def __init__(self, code: str, phase_setting: int): self.i = 0 self.mem = list(map(int, code.split(','))) self.inputs = [phase_setting] def run(self, input_signal: int) -> Optional[int]: self.inputs.append(input_signal) def param(num: int) -> int: return self.mem[self.i + num] def param_value(param_num: int) -> int: p = param(param_num)
from utils import read_input from sympy.ntheory.modular import crt data = read_input('inputs/input13.txt') def part_1(): departure_time = int(data[0]) bus_lines = [int(x) for x in data[1].split(',') if x != 'x'] diffs = [] for t in bus_lines: r = departure_time % t d = t - r diffs.append(d) min_diff = min(diffs) bus_id = bus_lines[diffs.index(min_diff)] print(min_diff * bus_id) def part_2(): times = data[1].split(',') bus_lines = [int(x) for x in data[1].split(',') if x != 'x'] time_diff = [times.index(str(x)) for x in bus_lines] # t mod b1 = r1 # t mod b2 = r2 # t mod b3 = r3 # ... reminders = [n1 - n2 for (n1, n2) in zip(bus_lines, time_diff)]
def reconstruct_images(alg, images, columns, connect_threshold, desired_activity_mult, min_overlap, img_shape, out_dir=None): cols_shape = columns.shape # Initialize boost matrix. boost = np.ones(shape=cols_shape[:2]) part_rbuffer = partial(RingBuffer, input_array=np.zeros(1, dtype=np.bool), copy=True) # Initialize activity dictionary. activity = defaultdict(part_rbuffer) if alg == 'bsp': # Initialize overlap_sum dictionary. overlap_sum = defaultdict(part_rbuffer) distances = calculate_distances(cols_shape) # Calculate the inhibition_area parameter. inhibition_area = update_inhibition_area(columns, connect_threshold) # Calculate the desired activity in a inhibition zone. desired_activity = desired_activity_mult * inhibition_area reconstructions = np.zeros_like(images) # Initialize the activations matrix. This will be used to calculate the # population and lifetime kurtoses. activations = np.zeros(shape=(images.shape[0], cols_shape[0], cols_shape[1]), dtype=np.int) for i, image, _ in read_input(images): if alg == 'bsp': overlap, _ = bsp_overlap(image, columns, min_overlap, connect_threshold, boost, overlap_sum) elif alg == 'asp': # calculate the overlap of the columns with the image. # (this is a simple count of the number of its connected synapses # that are receiving active input (p. 3)), ... overlap = asp_overlap(image, columns, min_overlap, connect_threshold, boost) # force sparsity by inhibiting columns, ... active, _ = inhibit_columns(columns, distances, inhibition_area, overlap, activity, desired_activity) # set reconstructions[i][y, x] to (sic, p. 7): # "[...] the linear superposition of the # connected synapses of the columns that become active # when the input is present [...]" # first, generate a copy of the columns array, where all the synapses # of all inactive columns are set to 0, ... active_cols = np.where(active, columns, np.zeros(shape=(cols_shape[2], cols_shape[3]))) # and then set reconstructions[i] to the linear superposition of the # synapses of the active columns. reconstructions[i] = np.nansum(active_cols, axis=(0, 1)) # Store the post-inhibition overlap activity of each column as the # sum of the overlap of the active columns. activations[i] = np.nansum(columns, axis=(2, 3)) if out_dir is not None: if not os.path.exists(out_dir): os.mkdir(out_dir) # Rebuild the 256x 256 images from the 16x16 patches. reconstructions = rebuild_imgs_from_patches(reconstructions, img_shape) # Scale the pixel values to the range [0, 1]. reconstructions = reconstructions - reconstructions.min() reconstructions /= reconstructions.max() - reconstructions.min() # Scale the pixel values to the range [0, 255]. reconstructions *= 255 for i, reconstruct_img in enumerate(reconstructions): with open(os.path.join(out_dir, 'rec_img_%d.png' % i), 'wb') as fp: plt.imsave(fname=fp, arr=reconstruct_img, cmap='gray', ) return activations
def test_with_input(self): boarding_passes = utils.read_input(5) self.assertEqual(day5.part_2(boarding_passes), 597)
seen, doors = navigate(p, opt + line[end + 1:], seen, doors) return seen, doors def p1(lines): global distances start = (0, 0) _, doors = navigate(start, lines[0].strip(), set(), defaultdict(set)) distances = {start: 0} q = Queue() q.put(start) while not q.empty(): src = q.get() for dst in doors[src]: if dst not in distances: distances[dst] = distances[src] + 1 q.put(dst) return max(distances.values()) def p2(_): global distances return len([k for k in distances if distances[k] >= 1000]) if __name__ == "__main__": solve(read_input(), lambda x: x, p1, p2)
or name in _optionnal_fields) def is_passport_valid(passport): return all(field in passport and is_field_valid(field, passport[field]) for field in _mandatory_fields) def part01(passports): valids_passports = list( filter(lambda x: all(field in x for field in _mandatory_fields), passports)) return len(valids_passports) def part02(passports): valids_passports = list(filter(is_passport_valid, passports)) return len(valids_passports) if __name__ == "__main__": input = [ line.strip("\n") for line in read_input("04", ignore_blank_lines=False) ] passports = format_passports(input) # print(passports) print(part01(passports)) print(part02(passports))
from utils import read_input, count, selection_phase, attacking_phase if __name__ == "__main__": groups, _ = read_input() nimmunes, ninfections = count(groups) while nimmunes > 0 and ninfections > 0: selection_phase(groups) attacking_phase(groups) groups = [g for g in groups if g.units > 0] nimmunes, ninfections = count(groups) nimmunes, ninfections = count(groups) print( f"There are {nimmunes} immune system units and {ninfections} infection units" )
elif pancake == len(stack) - 2: if revStack[pancake + 1] == '-': toFlip = revStack[pancake:] stayTheSame = revStack[:pancake] else: toFlip = revStack[pancake + 1:] stayTheSame = revStack[:pancake+1] else: toFlip = revStack[pancake:] stayTheSame = revStack[:pancake] flipped = flip(toFlip) stayTheSame.extend(flipped) return list(reversed(stayTheSame)) def flip(arr): rev = list(reversed(arr)) ans = [] for index in range(0, len(rev)): if rev[index] == '-': ans.append('+') elif rev[index] == '+': ans.append('-') return ans filename = utils.getFilename() input = utils.read_input(filename) output = process(input) utils.print_output(filename, output)
if node in traversed: continue last = False for edge in edges: if edge[1] == node and edge[0] not in traversed: last = True break if not last: candidates.append(node) candidates.sort() traversed.append(candidates[0]) return traversed[-1] if __name__ == "__main__": nodes, edges = read_input() traversed = [] start = find_start(nodes, edges, traversed) while len(traversed) < len(nodes): # Find edges starting with start candidates = [edge[1] for edge in edges if edge[0] == start] # Remove edges with a start that has not been traversed yet remove = [] for edge in edges: if edge[0] != start and edge[1] in candidates and edges[ 0] not in traversed: remove.append(edge[1]) candidates = [c for c in candidates if not c in remove] # Traverse an edge if len(candidates) > 0: candidates.sort()
from collections import Counter from itertools import chain from typing import List, Optional, Tuple from utils import read_input Point = Tuple[int, int] INPUT = read_input(6) INPUT_TEST = """ 1, 1 1, 6 8, 3 3, 4 5, 5 8, 9 """.strip().splitlines() def day6(input_: List[str], safe_range: int = 10000) -> None: def parse_line(line: str) -> Point: a, b = line.split(', ') return int(a), int(b) points = list(map(parse_line, input_)) def distance(a: Point, b: Point) -> int: return abs(b[0] - a[0]) + abs(b[1] - a[1]) def closest(a: Point) -> Optional[Point]: min_dist = 100000000
import utils if __name__ == '__main__': lines = utils.read_input(5) nums = [int(n) for n in lines] num_steps = 0 idx = 0 while idx < len(nums): old_idx = idx step = nums[idx] idx += step nums[old_idx] = nums[old_idx] + 1 if nums[old_idx] < 3 else nums[old_idx] - 1 num_steps += 1 print(num_steps)
def parse_input(): for line in read_input(7): splits = line.split() prereq = splits[1] action = splits[7] yield Instruction(prereq, action)
(opt, args) = parser.parse_args() # general setup maxNPF = 4500 n_features_pf = 8 n_features_pf_cat = 3 normFac = 50. epochs = 100 batch_size = 64 preprocessed = True emb_out_dim = 8 ## ## read input and do preprocessing ## Xorg, Y = read_input(opt.input) Y = Y / -normFac Xi, Xc1, Xc2, Xc3 = preProcessing(Xorg) print(Xc1.dtype) Xc = [Xc1, Xc2, Xc3] emb_input_dim = { i: int(np.max(Xc[i][0:1000])) + 1 for i in range(n_features_pf_cat) } print('Embedding input dimensions', emb_input_dim) # prepare training/val data Yr = Y Xr = [Xi] + Xc indices = np.array([i for i in range(len(Yr))])
#!/usr/bin/env python3 from utils import read_input def part1_solution(numbers): for i, num1 in enumerate(numbers): for j, num2 in enumerate(numbers, start=i + 1): if num1 + num2 == 2020: return num1 * num2 def part2_solution(numbers): for i, num1 in enumerate(numbers): for j, num2 in enumerate(numbers, start=i + 1): for k, num3 in enumerate(numbers, start=j + 1): if num1 + num2 + num3 == 2020: return num1 * num2 * num3 if __name__ == '__main__': # print(part1_solution(read_input('day1_input'))) print(part2_solution(read_input('day1_input')))
import utils import numpy as np from PIL import Image input = utils.read_input(delim='\n', generator=int) def a(input): for i in range(25, len(input)): num = input[i] seeds = input[i - 25:i] found = False for x in range(0, len(seeds)): for y in range(x, len(seeds)): if num == seeds[x] + seeds[y]: found = True break if not found: print(num) return num def b(inp, num): lo, hi, val = 0, 1, inp[0] + inp[1] while True: if val == num: rang = inp[lo:hi + 1] return max(rang) + min(rang) elif val < num: hi += 1 val += inp[hi]
from utils import read_input passwords = read_input('inputs/input02.txt') valid_passwords_part_1 = 0 valid_passwords_part_2 = 0 for line in passwords: # parse input [interval, letter, password] = line.split(' ') letter = letter[0] [lower, upper] = interval.split('-') lower, upper = int(lower), int(upper) # part 1 count = password.count(letter) if lower <= count <= upper: valid_passwords_part_1 += 1 # part 2 lower_position = password[lower - 1] == letter upper_position = password[upper - 1] == letter if lower_position != upper_position: valid_passwords_part_2 += 1 print(valid_passwords_part_1) print(valid_passwords_part_2)
for y2 in range(0, len(values)): for x2 in range(0, len(values[0])): value2 = values[y2][x2] south = y2 - y east = x2 - x if value2 == '.' or (south == 0 and east == 0): continue gcd = math.gcd(south, east) asteroid = (int(south / gcd), int(east / gcd)) if asteroid not in asteroids: asteroids.append(asteroid) if len(asteroids) > len(most_asteroids): most_asteroids = asteroids loc = (x, y) print(loc) result = len(most_asteroids) return result if __name__ == '__main__': lines = read_input() result = str(calc(lines)) write_output(result) check_result(result)
import numpy as np from utils import read_input, score_gen, grow if __name__ == "__main__": low_boundary = -10 gen, trans = read_input(abs(low_boundary)) scores = grow(gen, trans, low_boundary, 200, verbose="score") scores = np.asarray(scores) fit = np.polyfit(scores[100:,0], scores[100:,1], 1) print(fit, scores[-1,:], fit[1] + scores[-1,0]*fit[0]) n = 50000000000 fit_score = fit[1] + n*fit[0] print(f"Fitted score after {n} generations are {fit_score}")
def main(separator='\t'): """Emit the node and its neighbour.""" data = read_input(sys.stdin) for u, v in data: print("{}\t{}".format(u, v)) print("{}\t{}".format(v, u))
self.direction = turn_left(self.direction) elif self.num_cross == 1: pass else: self.direction = turn_right(self.direction) self.num_cross = (self.num_cross + 1) % 3 def __eq__(self, other): # for in operator return isinstance(other, Cart) and self.x == other.x and self.y == other.y def __repr__(self): return f"Cart({self.x}, {self.y}, {self.direction})" raw = read_input(__file__) road_map = [] carts: Deque[Cart] = deque([]) for y, line in enumerate(raw.splitlines()): row = [] for x, char in enumerate(line): if char in (Direction.UP, Direction.DOWN): row.append("|") carts.append(Cart(x, y, char)) elif char in (Direction.LEFT, Direction.RIGHT): row.append("-") carts.append(Cart(x, y, char)) else: row.append(char) road_map.append(row)
import utils from collections import Counter def east(stepcount): return (stepcount['ne'] - stepcount['sw']) + (stepcount['se'] - stepcount['nw']) if __name__ == '__main__': inp = utils.read_input(11) steps=inp[0].split(',') stepcount = Counter(steps) print('north/south dist away:') print(stepcount['n'] - stepcount['s']) eastdist = 366 + 284 # I guess this is the answer... # north/south is less, so only east matters print(eastdist) maxfar = 0 for idx in range(len(steps)): maxfar = max(east(Counter(steps[:idx])), maxfar) print(maxfar)
from collections import Counter, deque from utils import read_input INPUT = read_input('day8') def part1(inp: str) -> None: digits = list(map(int, inp)) layers = [] size = 25 * 6 for i in range(len(digits) // size): layer = digits[i * size:(i + 1) * size] layers.append(Counter(layer)) layer_min_0 = min(layers, key=lambda c: c[0]) print(layer_min_0[1] * layer_min_0[2]) def part2(inp: str) -> None: digits = deque(map(int, inp)) img = [[deque() for _ in range(25)] for _ in range(6)] size = 25 * 6 for layer in range(len(digits) // size): for r in range(6): for c in range(25):
import re import utils from collections import Counter inp_lines = utils.read_input(8) registers = Counter() cur_max = 0 for line in inp_lines: reg, command, value, condition = \ re.match(r'([A-Za-z]+) (inc|dec) ([-]*\d+) if (.+)', line).groups() value = int(value) check_reg = condition.split(' ')[0] reg_value = registers[check_reg] condition = condition.replace(check_reg, str(reg_value)) result = eval(condition) if not result: continue if command == 'inc': registers[reg] += value else: registers[reg] -= value cur_max = max(cur_max, registers.most_common(1)[0][1]) print(registers.most_common(1)) print(cur_max)
def main() -> Tuple[int, int]: s = read_input(__file__) result_1 = part1(s) result_2 = part2(s) return result_1, result_2
import utils inp = utils.read_input(24) testinp = """0/2 2/2 2/3 3/4 3/5 0/1 10/1 9/10""".splitlines() def parse_input(inp): return [tuple(map(int, line.split('/'))) for line in inp] def possible(domino): return [domino, (domino[1], domino[0])] def score(domino_list): return sum((dom[0] + dom[1]) for dom in domino_list) def last_entry(domino_list): return domino_list[-1][1] def find_chains(cur_list, dominos_to_pick): if not dominos_to_pick:
parser.add_argument("--visualize", default="2d", help="2d/3d") args = parser.parse_args() if args.algorithm == "ucs": algorithm = ucs elif args.algorithm == "greedy": algorithm = greedy elif args.algorithm == "a_star": algorithm = a_star elif args.algorithm == "t3_greedy": algorithm = t3_greedy elif args.algorithm == "t3_dp": algorithm = t3_dp else: raise ValueError(args.algorithm + " is not an acceptable algorithm") if args.visualize == "2d": visualize = "2d" elif args.visualize == "3d": visualize = "3d" else: raise ValueError(args.visualize + " is not an acceptable visualisation option") bound_x, bound_y, s_point, t_point, polygon_list, extra_point_list = read_input( args.input) visualize_input(polygon_list, s_point, t_point, extra_point_list, bound_x, bound_y) graph = Graph(polygon_list, s_point, t_point, extra_point_list) graph.visualize_with(algorithm, visualize, bound_x, bound_y)
mode='max') early = EarlyStopping(monitor="val_acc", mode="max", patience=5) ra_val = RocAucEvaluation(validation_data=(input_val, output_val), interval=1) callbacks_list = [ra_val, checkpoint, early] model.fit(input_train, out_train, batch_size=batch_size, validation_data=(input_val, output_val), epochs=epochs, callbacks=callbacks_list, verbose=1) model.summary() model.save('biLSTM.h5') if __name__ == "__main__": # Input data files are available in the "../input/" directory. input_path = 'input/' sentenceLength = 150 input_train, topic_train, out_train, input_val, topic_val, out_val = read_input( input_path) (input_train, input_val, topic_train, topic_val, word_index) = text_precocess(input_train, input_val, topic_train, topic_val) model = build_model() train_model(model, input_train, out_train, input_val, out_val)
count += 1 cur_config = tuple(banks) # print(cur_config) print(count) print(loop_ctr) # ## Day 8 # In[24]: inp = read_input(7) inp = """pbga (66) xhth (57) ebii (61) havc (66) ktlj (57) fwft (72) -> ktlj, cntj, xhth qoyq (66) padx (45) -> pbga, havc, qoyq tknk (41) -> ugml, padx, fwft jptl (61) ugml (68) -> gyxo, ebii, jptl gyxo (61) cntj (57)""" words = Counter() for line in inp: