Exemplo n.º 1
0
def part1():
    directions = [Direction(s) for s in read_lines_as_list('data/day12.txt')]
    facing = EAST
    x, y = (0, 0)
    for d in directions:
        x, y, facing = d.apply(x, y, facing)
    return abs(x) + abs(y)
Exemplo n.º 2
0
def part2():
    def convert_clockwise(direction):
        if d.direction == 'R':
            return direction.movement
        else:
            if direction.movement == 90:
                return 270
            elif direction.movement == 180:
                return 180
            else:
                return 90

    def turn_clockwise(waypoint, direction):
        degrees = convert_clockwise(direction)
        for _ in range(degrees // 90):
            wx, wy = waypoint
            waypoint = (wy, -wx)
        return waypoint
    
    directions = [Direction(s) for s in read_lines_as_list('data/day12.txt')]
    waypoint = (10, 1)
    ship_location = (0, 0)
    for d in directions:
        if d.direction == 'F':
            ship_x, ship_y = ship_location
            wp_x, wp_y = waypoint
            ship_location = (ship_x + d.movement * wp_x, ship_y + d.movement * wp_y)
        elif d.direction in {'L', 'R'}:
            waypoint = turn_clockwise(waypoint, d)
        else:
            wx, wy, _ = d.apply(waypoint[0], waypoint[1], d.direction)
            waypoint = (wx, wy)
    return abs(ship_location[0]) + abs(ship_location[1])
Exemplo n.º 3
0
def part1():
    input_data = read_lines_as_list('data/day1.txt')
    num_set = set([int(n) for n in input_data])
    for n in num_set:
        if 2020 - n in num_set:
            return n * (2020 - n)
    raise Exception("Looks like I'm on the naughty list :x")
Exemplo n.º 4
0
def build_graph():
    input_data = read_lines_as_list('data/day7.txt')
    bags = [Bag(data) for data in input_data]
    graph = {}
    for b in bags:
        graph[b.bag_type] = list(b.contains_bag.items())
    return graph
Exemplo n.º 5
0
def part1():
    earliest, buses = read_lines_as_list('data/day13.txt')
    earliest = int(earliest)
    buses = [int(b) for b in buses.split(',') if b != 'x']
    closest_buses = sorted([(find_closest(b, earliest), b) for b in buses],
                           key=lambda t: t[0])
    dist, bus_stop = closest_buses[0]
    return dist * bus_stop
Exemplo n.º 6
0
def calculate_trees_hit(right, down):
    input_data = read_lines_as_list('data/day3.txt')
    n = len(input_data[0])
    trees = 0
    horizontal_ptr = 0
    for row in input_data[down::down]:
        horizontal_ptr = (horizontal_ptr + right) % n
        if row[horizontal_ptr] == '#':
            trees += 1
    return trees
Exemplo n.º 7
0
def part1():
    input_data = read_lines_as_list('data/day8.txt')
    instructions = [Instruction(data) for data in input_data]
    lines_executed = set()
    acc = 0
    ptr = 0
    while ptr not in lines_executed:
        lines_executed.add(ptr)
        acc, ptr = instructions[ptr].execute(acc, ptr)
    return acc
Exemplo n.º 8
0
def part2():
    input_data = read_lines_as_list('data/day8.txt')
    instructions = [Instruction(data) for data in input_data]
    broken_lines = find_broken_lines(instructions)
    fix_attempts = [
        fix_program_and_get_acc(instructions, bl) for bl in broken_lines
    ]
    answer = [fa for fa in fix_attempts if fa is not None]
    assert len(answer) == 1, "you did this problem wrong dude"
    return answer[0]
Exemplo n.º 9
0
 def __init__(self):
     self.word_df = load_word_df()   # 文档频率
     self.stopwords = read_lines_as_set('dicts/stopwords.txt') # 停用词
     self.stopch = read_lines_as_set('dicts/stopch.txt')
     self.surnames = read_lines_as_set('dicts/surnames.txt') # 姓
     self.stop_lexicons = read_lines_as_set('dicts/stop_lexicons.txt', skip=1) # 包含该词的也视为停用词
     self.it_lexicons = read_lines_as_list('dicts/it_lexicons.txt', skip=1) # it专业名词
     self.two_unary = read_lines_as_set('dicts/two_unary.txt', skip=1) 
     self.bigram_lexicons = read_lines_as_set('dicts/bigram_dict.txt', skip=1) 
     self.trigram_lexicons = read_lines_as_set('dicts/trigram_dict.txt', skip=1) 
     self.quatgram_lexicons = read_lines_as_set('dicts/quatgram_dict.txt', skip=1)
Exemplo n.º 10
0
def part2():
    _, buses = read_lines_as_list('data/day13.txt')
    buses = [(idx, int(b)) for idx, b in enumerate(buses.split(','))
             if b != 'x']
    earliest = find_min_earliest(buses)
    first = buses[0][1]
    while True:
        print(ea)
        if any(not fits_condition(earliest, offset, bus_stop)
               for offset, bus_stop in buses[1:]):
            earliest += first
        else:
            return earliest
Exemplo n.º 11
0
def part1():
    input_data = read_lines_as_list('data/day10.txt')
    nums = sorted([int(n) for n in input_data])
    one_volt_diff = 0
    three_volt_diff = 0
    for prev, next_ in zip(nums[:-1], nums[1:]):
        if next_ - prev == 3:
            three_volt_diff += 1
        elif next_ - prev == 1:
            one_volt_diff += 1
        else:
            raise Exception("this broken")
    return (one_volt_diff + 1) * (three_volt_diff + 1)
Exemplo n.º 12
0
def do_task_1_nn(task1_ids):
    keywordExtractor = task1_tfidf_keyword_extractor.KeywordExtractor()
    word_df = load_word_df()
    it_lexicons = read_lines_as_list('dicts/it_lexicons.txt', skip=1) # it专业名词
    neuralNetworkKeywordClassifier = task1_nn_keyword_classifier.NeuralNetworkKeywordClassifier()
    neuralNetworkKeywordClassifier.load_model('model/task1/0810_02_19_64.54.h5')
    nnFeatureGenerator = task1_nn_keyword_classifier.NeuralNetworkFeatureGenerator(it_lexicons, word_df)
    res = {}
    for doc_id in task1_ids:
        title, content = read_doc_by_id(doc_id)
        title_word, content_word, keywords = keywordExtractor._extract(title, content, alpha=0.22, num1=12, num2=16, delete=False, withscore=False, enrich=False)
        x = nnFeatureGenerator.gen_feature_of_doc_instance(doc_id, keywords, title_word, content_word)
        res[doc_id] = neuralNetworkKeywordClassifier.extractKeywords(x, keywords)
    return res
Exemplo n.º 13
0
def part2():
    input_data = read_lines_as_list('data/day10.txt')
    nums_set = set([int(n) for n in input_data])
    term = max(nums_set)

    @lru_cache
    def find_combinations(start):
        if start == term:
            return 1
        count = 0
        for i in range(1, 4):
            if start + i not in nums_set:
                continue
            count += find_combinations(start + i)
        return count
    return find_combinations(0)
Exemplo n.º 14
0
 def __init__(self):
     self.word_df = load_word_df()  # 文档频率
     self.stopwords = read_lines_as_set('dicts/stopwords.txt')  # 停用词
     self.stopch = read_lines_as_set('dicts/stopch.txt')
     self.surnames = read_lines_as_set('dicts/surnames.txt')  # 姓
     self.stop_lexicons = read_lines_as_set('dicts/stop_lexicons.txt',
                                            skip=1)  # 包含该词的也视为停用词
     self.it_lexicons = read_lines_as_list('dicts/it_lexicons.txt',
                                           skip=1)  # it专业名词
     self.two_unary = read_lines_as_set('dicts/two_unary.txt', skip=1)
     self.bigram_lexicons = read_lines_as_set('dicts/bigram_dict.txt',
                                              skip=1)
     self.trigram_lexicons = read_lines_as_set('dicts/trigram_dict.txt',
                                               skip=1)
     self.quatgram_lexicons = read_lines_as_set('dicts/quatgram_dict.txt',
                                                skip=1)
Exemplo n.º 15
0
def part2():
    input_data = read_lines_as_list('data/day9.txt')
    nums = [int(s) for s in input_data]
    invalid_num = part1()
    s = 0
    left = 0
    right = 0
    while right < len(nums):
        s += nums[right]
        while s > invalid_num:
            s -= nums[left]
            left += 1
        right += 1
        if s == invalid_num:
            return min(nums[left:right]) + max(nums[left:right])
    raise Exception("U did this wrong")
Exemplo n.º 16
0
def part2():
    input_data = read_lines_as_list('data/day1.txt')
    nums = [int(n) for n in input_data]
    complements = {}
    for i in range(len(nums)):
        for j in range(i + 1, len(nums)):
            comp = 2020 - nums[i] - nums[j]
            if comp >= 0:
                complements[comp] = (i, j)

    for idx, num in enumerate(nums):
        if num not in complements:
            continue
        idx1, idx2 = complements[num]
        if idx != idx1 and idx != idx2:
            return nums[idx1] * nums[idx2] * num
    raise Exception("On the naughty list twice..")
Exemplo n.º 17
0
def part1():
    input_data = read_lines_as_list('data/day9.txt')
    nums = [int(s) for s in input_data]
    counter = Counter(nums[:25])
    
    def can_find_sum(num):
        for k in counter.keys():
            comp = num - k
            if (comp == k and counter[comp] >= 2):
                    return True
            elif counter[comp] >= 1:
                return True
            else:
                pass
        return False

    for idx in range(25, len(nums)):
        num = nums[idx]
        if not can_find_sum(num):
            return num
        counter[num] += 1
        counter[nums[idx - 25]] -= 1
        if counter[nums[idx - 25]] == 0:
            del counter[nums[idx - 25]]
Exemplo n.º 18
0
def part2():
    input_data = read_lines_as_list('data/day5.txt')
    seat_ids = [get_seat_id(data) for data in input_data]
    return set(range(min(seat_ids), max(seat_ids))) - set(seat_ids)
Exemplo n.º 19
0
def part1():
    input_data = read_lines_as_list('data/day5.txt')
    return max([get_seat_id(data) for data in input_data])
Exemplo n.º 20
0
from copy import deepcopy
from util import read_lines_as_list

EMPTY = 'L'
OCCUPIED = '#'
FLOOR = '.'
INPUT_DATA = [list(row) for row in read_lines_as_list('data/day11.txt')]
in_bounds = lambda x, y: 0 <= x < len(INPUT_DATA) and 0 <= y < len(INPUT_DATA[
    0])


def simulate_round(seats, has_adjacent_occupied_seat_fn,
                   adjacent_occupied_seats_to_free):
    new_seats = deepcopy(seats)

    def get_symbol(x, y):
        current_state = seats[x][y]
        if current_state == FLOOR:
            return FLOOR

        directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1),
                      (1, 0), (1, 1)]
        if current_state == EMPTY:
            for dx, dy in directions:
                if in_bounds(x + dx, y + dy) and has_adjacent_occupied_seat_fn(
                        x, dx, y, dy, seats):
                    return EMPTY
            return OCCUPIED

        else:  # current_state == OCCUPIED
            occupied_seats = 0
Exemplo n.º 21
0
def part2():
    input_data = read_lines_as_list('data/day2.txt')
    passwords = [Password(s) for s in input_data]
    return len([pwd for pwd in passwords if pwd.is_valid_part2()])
Exemplo n.º 22
0
def part2():
    input_data = read_lines_as_list('data/day4.txt', split_pattern='\n\n')
    parsed = [string_to_dict(s) for s in input_data]
    return len([p for p in parsed if SchemaPartTwo(p)])
Exemplo n.º 23
0
def part1():
    input_data = read_lines_as_list('data/day6.txt', split_pattern='\n\n')
    group = [''.join(d.split('\n')) for d in input_data]
    return reduce(lambda acc, curr: acc + len(set(curr)), group, 0)
Exemplo n.º 24
0
    elif cache_place == "lookup":
        mcdir = WORKDIR + "/mc/"

        cache_prefix = {}
        conff = {}
        for ind in useDataSets:
            dataname = ind2dataName[ind]
            if iprint: print(__file__.split("/")[-1], dataname)
            conff[ind] = confs % dataname
            cache_prefix[ind] = get_conf(conff[ind], "cache_prefix")

        flist = glob.glob(mcdir + "/cache_server*")
        servers = []
        for i in range(len(flist)):
            serverip = read_lines_as_list(flist[i])
            serverip = serverip[0]
            servers.append(serverip)

            tmp = memcache.Client([serverip + ":11211"])
            ret = tmp.set("tmp", 1, time=10)
            val = tmp.get("tmp")
            if not (ret and val):
                print("-- Cache server fail: " + flist[i], serverip)
                mc_ind = flist[i].split(".")[0].lstrip(mcdir + "/cache_server")
                print("-- Run this cmd at login node:")
                print("jbsub -interactive -queue x86_7d -mem 40g sh " +
                      WORKDIR + "/memcached/start.sh " + mc_ind)
                sys.exit(0)

        if iprint: print('servers', servers)
Exemplo n.º 25
0
def part2():
    input_data = read_lines_as_list('data/day6.txt', split_pattern='\n\n')
    return reduce(lambda acc, curr: acc + group_consensus(curr), input_data, 0)