# Advent of Code 2018 - Day 3 # Author: Rachael Judy # Date: 12/15/2020 # Purpose: Count squares of grid that are used multiple times and find the claim that is used only once import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import parseMod # get input recommendations = parseMod.readCSV_rowEl('data/3cuts.csv') # populate fabric grid fabric = [[0 for j in range(1200)] for i in range(1200)] for suggestion in recommendations: position = suggestion[2].strip(':').split( ',') # third argument is position of top left dimensions = suggestion[3].split('x') # fourth argument is dimension # increment the sea_monster_count on section being covered for y in range(int(position[1]), int(position[1]) + int(dimensions[1])): for x in range(int(position[0]), int(position[0]) + int(dimensions[0])): fabric[y][x] += 1 # sea_monster_count how many have more than one claims count_overlaps = 0 for y in range(len(fabric)):
else: return -1 # indicates termination due to infinite loop if self.pc >= len(self.instructions): return 0 elif instruction[0] == 'acc': # add to accumulator self.acc += int(instruction[1]) self.pc += 1 elif instruction[0] == 'jmp': # jump argument relationships self.pc += int(instruction[1]) elif instruction[0] == 'nop': # go on, no operation self.pc += 1 # read input instructions instr = parseMod.readCSV_rowEl('data/8instructions.csv', ' ') # part 1 - find accumulator at infinite loop print("PART 1") console = GameConsole(instr) code = console.compute(check_loop=True) print("Acc at first loop: ", console.acc) print() # find accumulator on correct done, presuming incorrect jmp or nop switch print("PART 2") for i in range(len(instr)): # copy instructions, fix one error copyInstructions = copy.deepcopy(instr) if copyInstructions[i][0] == 'jmp': copyInstructions[i][0] = 'nop'
reactionsNeeded = math.ceil((target[1] - amountsNeeded[target[0]][1]) / outputDict[target[0]]) generated = reactionsNeeded * outputDict[target[0]] spare = generated - target[1] + amountsNeeded[target[0]][1] # assign amount needed and spare for the target amountsNeeded[target[0]][0] += generated # target number used amountsNeeded[target[0]][1] = spare # spares # check the needs of all children elements for element in reactionDict[target[0]]: elementsNeeded = reactionsNeeded * element[1] # number of element needed to produce target calculateCost((element[0], elementsNeeded)) # call on element to find out if extra generated and get children # parse input reactions = parseMod.readCSV_rowEl('data/14reactions.csv') global reactionDict, amountsNeeded, outputDict # dictionary of eq represenations and costs reactionDict = dict() outputDict = dict() amountsNeeded = dict() # parse into output input dictionaries for reaction in reactions: inputs = [] for i in range(0, len(reaction) - 3, 2): inputs.append((reaction[i+1].strip(','), int(reaction[i]))) reactionDict[reaction[-1]] = inputs outputDict[reaction[-1]] = int(reaction[-2])
# Author: Rachael Judy # Date: 12/13/2020 # Purpose: Find the soonest train that is after the start time # Find first time with all trains departing at time + index in original read in that matches import os import sys import math sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import parseMod # get input information = parseMod.readCSV_rowEl('data/13loop.csv', ',') target = int(information[0][0]) # initialize the target value buses, indices = [], [] # track the spots where a bus actually is min_bus_departure = 10_000_000 # used to store minimum departure time available bus_id = 0 # bus to take # Part 1, setup for part 2 # go through each bus and find first time it can be taken for element in information[1]: if element.isnumeric(): # if not x, store bus buses.append(int(element)) indices.append(information[1].index(element)) # save closest_possible_departure possible departure time for this bus - calculates first time can leave closest_possible_departure = math.ceil(target / float(element)) * int(element)
# Advent of Code 2019 - Day 22 # Author: Rachael Judy # Date: 12/12/2020 # Purpose: Implement shuffles on massive deck import os import sys import copy sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import parseMod dealing = parseMod.readCSV_rowEl('data/22dealer.csv') # part 1 - kind of brute force # create deck deck = [i for i in range(10007)] for instruction in dealing: if instruction[-2] == 'increment': new_deck = [0 for i in range(len(deck))] index = 0 for card in deck: new_deck[index % len(deck)] = card index += int(instruction[-1]) deck = copy.deepcopy(new_deck) elif instruction[-2] == 'cut': deck = deck[int(instruction[-1]):len(deck )] + deck[0:int(instruction[-1])] elif instruction[-2] == 'new':
# Author: Rachael Judy # Date: 12/16/2020 # Purpose: Parse the list of conditions, my ticket, and other tickets to determine the sum of the invalid numbers on # the tickets (part 1). Remove invalid tickets and use what's left to determine what field each number is. # Find the product of departure fields on my ticket (part 2) import copy import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import parseMod # read input tickets = parseMod.readCSV_rowEl('data/16tickets.csv') # get the conditions by reading to empty line condition_list = [] for condition_i, ticket in enumerate(tickets): if not len(ticket): break # store bounds of condition as int bounds = [ticket[-3].split('-'), ticket[-1].split('-')] for x, c in enumerate(bounds): for i, n in enumerate(c): bounds[x][i] = int(n) # determine name if len(ticket) > 4:
# Advent of Code Day 2 # Author: Rachael Judy # Date: 12/2/2020 # Purpose: Check password validity of min and max num of certain characters p1 # Check XOR of occurrence of character at specified index; 1 indexing for the non programmers making bad passwords import parseMod stage = 2 # get array of each line, split at spaces pwd_list = parseMod.readCSV_rowEl('data/2pswd.csv', ' ') numValid = 0 for pswd in pwd_list: bounds = pswd[0].split('-') mini = int(bounds[0]) # min occupied_count or index maxi = int(bounds[1]) # max occupied_count or index c = pswd[1][0] # character looking for count = 0 # For Part 2 - checking character position if stage == 2: # XOR of occurrence at two spots if pswd[2][mini - 1] == c and pswd[2][maxi - 1] != c or\ pswd[2][mini - 1] != c and pswd[2][maxi - 1] == c: numValid += 1 else: # For Part 1 - checking the char occupied_count for letter in pswd[2]: if letter == c: count += 1
14380, 14380, 14380, 13737, 13657, 13547, 13539, 13525, 13525, 13525, 13108, 12903, 12862, 12862, 12836, 12830, 12830, 12836, 12862, 12903, 13108, 13188, 13108, 12903, 12862, 12836, 12830, 12723, 12479, 12410, 12383, 12207, 12036, 11822, 11679, 11679, 11679, 11679, 11679, 11679, 11679, 11783, 11895, 12001, 12169, 12402, 12444, 12444, 12444, 12444, 12444, 12444, 12410, 12383, 12324, 12207, 12036, 11776, 11776, 11776, 11776, 11793, 11855, 12101, 11500, 11730, 11812, 11855, 12003, 11812, 11730, 11676, 11500, 11459, 11459, 11459, 11151, 11077, 10994, 10971, 10969, 10956, 10847, 10689, 10607, 10517, 10517, 10760, 11055, 11679, 11724, 11728, 11783, 11725, 11724, 11679, 11590, 11055, 10930, 10796, 10787, 10063, 10063, 10063, 10517, 10760, 10932, 10954, 10954, 10690, 10690, 10690, 10690, 10689, 10607, 10270, 10130, 10517, 10690 ] # read file in - an array of two arrays, one for each wire paths = parseMod.readCSV_rowEl('data/3steps.csv', ',') # parse based on R, D, L, U etc, update grid grid = numpy.zeros((20000, 20000)) ind = 0 sol = 100000 # set initial high center = 10000 # occupied_count steps for each wire stepCount = [0, 0] # on second pass keep minimum steps = [[500000 for i in xs], [500000 for j in ys]] marked = [0 for k in range(len(xs) + 1)] # populate the close spot options while ind < len(paths): # do both wires
# check each possible container inside the source for bag in content_dict[source]: # if contains nothing, path done if bag[0] == 'other bags. ': return # number of bag inside source, add what it could contain # print(bag[0]) amount = bag[1] * number countContains(bag[0], amount) count += amount # get input rules = parseMod.readCSV_rowEl('data/7bags.csv', ' ') global content_dict content_dict = dict() global count count = 0 global bags_collected bags_collected = [] # go through each rule, placing outputs and inputs in content_dict for rule in rules: # contains blank relationships if rule != '': bag_type = arrayToString(rule[0:2]) content_dict[bag_type] = []