if last_index >= 0 and letters[last_index] == y: letters.pop() else: letters.append(x) return "".join(letters) def smallest(input): reduced_size = len(input) for char in string.ascii_lowercase: replaced = input.replace(char, "").replace(char.swapcase(), "") reduced = reduce(replaced) if len(reduced) < reduced_size: reduced_size = len(reduced) return reduced_size input = read_file("../inputs/input5.txt") polymers = input[0].rstrip() print("input length:", len(polymers)) reduced = reduce(polymers) print("reduced polymer:", len(reduced), "'" + reduced + "'") print("smallest after removing one:", smallest(polymers)) if __name__ == "__main__": import doctest doctest.testmod()
frequency += change frequencies.append(frequency) return frequencies def find_first_repeated(input_changes): changes = [int(x) for x in input_changes] frequency = 0 frequencies = {str(frequency): 1} while 1: for change in changes: frequency += change if str(frequency) in frequencies: return frequency frequencies[str(frequency)] = 1 input_changes = read_file("../inputs/input1.txt") frequency_results = calc_frequency(input_changes) print("Final frequency is ", frequency_results[-1]) first_repeated = find_first_repeated(input_changes) print("First repeated frequency is ", first_repeated) if __name__ == "__main__": import doctest doctest.testmod()
def parse_claim(str): [id, details] = str.split(" @ ") [position, size] = details.split(": ") [left, top] = [int(x) for x in position.split(",")] [width, height] = [int(x) for x in size.split("x")] claim = { "id": id, "left": left, "top": top, "width": width, "height": height } return claim claim_strings = read_file("../inputs/input3.txt") claims = [parse_claim(x) for x in claim_strings] canvas = count_claims(claims) count = overlapping_count( canvas) print("number of overlapping cells is", count) clean_claim = find_clean_claim(canvas, claims) print("non-overlapping claim is ", clean_claim) # print("Same letters are ", same_letters) if __name__ == "__main__": import doctest doctest.testmod()
for row in matrix: print( row ) # inputs = {'A':(1,1), 'B':(1,6), 'C':(8,3), 'D':(3,4), 'E':(5,5), 'F':(8,9)} def parse_input(inputs): ins = {} i=0 for row in inputs: i+=1 coords = [int(x) for x in row.split(', ')] ins[i] = coords return ins in_list = read_file("../inputs/input6.txt") inputs = parse_input(in_list) print(inputs) matrix = bounds([x for x in inputs.values()]) #matrix = closest(inputs, matrix) #ids = sizes(matrix) #print("size by id", ids) #largest = biggest(matrix, ids) #print("largest area is", largest) score(inputs, matrix) area_size = count_score_below(matrix, 10000) print("area with scores below 10000:", area_size) if __name__ == "__main__":
diff_count += 1 if diff_count > 1: break if diff_count == 1: return [id1, id2] def same(id1, id2): same = "" for i in range(len(id1)): if id1[i] == id2[i]: same += id1[i] return same ids = read_file("../inputs/input2.txt") two_count = 0 three_count = 0 for id in ids: char_map = char_count(id) if 2 in char_map.values(): two_count += 1 if 3 in char_map.values(): three_count += 1 print("Checksum is ", two_count * three_count) similar_ids = similar(ids) print("Similar ids are ", similar_ids)
if not found: keys.append(key) return keys def graph(inputs): dependencies = {} dependants = {} for input in inputs: [a, before, b, c, d, e, f, after, g, h] = input.strip().split(" ") if after in dependencies.keys(): dependencies[after] += before else: dependencies[after] = [before] if before in dependants.keys(): dependants[before] += after else: dependants[before] = [after] return [dependants, dependencies] inputs = read_file("../inputs/input7.txt") [dependants, dependencies] = graph(inputs) start = find_start(dependants) order = traverse(start, dependants, dependencies) print("Step order: ", "".join(order)) if __name__ == "__main__": import doctest doctest.testmod()