Esempio n. 1
0
def p2(file_name):
    data = read_input(file_name)
    data.sort(key=lambda x: datetime.strptime(x[1:17], DATE_FORMAT))
    current_guard = None
    current_start = None
    current_end = None
    guards = {}
    for note in data:
        if 'begins' in note:
            current_guard = note
            guard_id = note.split('#')[1]
            guard_id = guard_id.split(' ')[0]
            if guard_id not in guards:
                guards[guard_id] = []
        elif 'asleep' in note:
            current_start = int(note[15:17])
        elif 'wakes' in note:
            current_end = int(note[15:17])
            guards[guard_id].extend(list(range(current_start, current_end)))

    most_frequent = None
    for guard_id, times in guards.items():
        minutes = dict.fromkeys(range(60), 0)
        for minute in times:
            minutes[minute] += 1
        most_minute = max(minutes.items(), key=lambda x: x[1])
        if most_frequent is None:
            most_frequent = (guard_id, most_minute[0], most_minute[1])
        else:
            is_bigger = most_minute[1] > most_frequent[2]
            if is_bigger:
                most_frequent = (guard_id, most_minute[0], most_minute[1])
    return int(most_frequent[0]) * most_frequent[1]
Esempio n. 2
0
def problem1(file):
    lines = util.read_input("input/"+file+".txt")
    terrain, units = parse_input(lines)

    ticks = 0
    for i in range(10000):
        end = False

        player_order = pydash.sort_by(units, POS)
        for p in player_order:
            if p not in units:
                continue

            if len(set(map(lambda u: u[TEAM], units))) < 2:
                end = True
                break

            move(p, terrain, units, 3)

        if end:
            break

        ticks += 1

        # print(ticks)
        # print_state(terrain, units)

    print(ticks)
    print_state(terrain, units)

    health_sum = sum(map(lambda u: u[HEALTH], units))
    print(ticks * health_sum)
Esempio n. 3
0
def p1(file_name):
    data = read_input(file_name)
    data.sort(key=lambda x: datetime.strptime(x[1:17], DATE_FORMAT))
    current_guard = None
    current_start = None
    current_end = None
    guards = {}
    for note in data:
        if 'begins' in note:
            current_guard = note
            guard_id = note.split('#')[1]
            guard_id = guard_id.split(' ')[0]
            if guard_id not in guards:
                guards[guard_id] = []
        elif 'asleep' in note:
            current_start = int(note[15:17])
        elif 'wakes' in note:
            current_end = int(note[15:17])
            guards[guard_id].extend(list(range(current_start, current_end)))

    most_sleep = max(guards.items(), key=lambda x: len(x[1]))
    _id = int(most_sleep[0])
    minute = mode(most_sleep[1])

    return _id * minute
Esempio n. 4
0
def problem2_intepreted(filename):
    lines = util.read_input(filename)
    regs = [0] * 6
    ip_reg = util.parse_int_line(lines[0])[0]
    ops = []
    args = []
    for l in lines[1:]:
        ops.append(l.split(" ")[0])
        args.append(util.parse_int_line(l))

    found_values = set()

    matches = 0
    while matches < 100:
        ip = regs[ip_reg]
        if ip == 28:
            matches += 1
            if regs[4] not in found_values:
                found_values |= {regs[4]}
                print(datetime.now(), regs[4])
        # make sure it does not terminate
        if ip > 30:
            ip = 5
        if not (ip >= 0 and ip < len(ops)):
            break
        o = ops[ip]
        a = args[ip]
        operations[o](regs, a)
        regs[ip_reg] += 1
Esempio n. 5
0
def evaluate(in_file, out_file):
    _, _, B, _, rides, cars = read_input(in_file)
    rides = rides
    used_rides = [False] * len(rides)

    score = 0

    def error():
        print("Invalid output file")
        exit(0)

    with open(out_file, 'r') as f:
        for c in cars:
            line = [int(x) for x in f.readline().strip().split()]
            if line[0] != len(line)-1:
                error()
            for r in line[1:]:
                if not used_rides[r]:
                    used_rides[r] = True
                    score += len(rides[r]) if c.servable(rides[r]) else 0
                    score += B if c.bonus(rides[r]) else 0
                    c.serve(rides[r])
                else:
                    error()
    return score
Esempio n. 6
0
def problem2():
    lines = util.read_input(filename)
    stop = lines[0]
    # stop = "51589"
    scores = [3, 7]

    target = []
    for s in stop:
        target.append(int(s))
    target.reverse()

    elf1 = 0
    elf2 = 1
    m = len(stop)
    while True:
        s = scores[elf1] + scores[elf2]
        for d in str(s):
            scores.append(int(d))
        elf1 = (elf1 + scores[elf1] + 1) % len(scores)
        elf2 = (elf2 + scores[elf2] + 1) % len(scores)

        if match(scores, len(scores) - 1, target):
            break
        if match(scores, len(scores) - 2, target):
            break
            m += 1

    for s in scores[-10:]:
        print(s, end="")
    print()
    print(len(scores) - m)
Esempio n. 7
0
def part2():
    field_rules, your_ticket, nearby_tickets = parse_input(read_input())

    valid_tickets = list(
        filter(lambda t: ticket_is_valid(t, field_rules), nearby_tickets))

    fields = {
        field_name: Field(field_name)
        for field_name in field_rules.keys()
    }
    positions = [Position(i) for i in range(len(fields))]

    for position in positions:
        possible_fieldnames_str = set.intersection(
            *(find_possible_fields(t[position.index], field_rules)
              for t in valid_tickets))
        position.possible_fields = [
            fields[field_name] for field_name in possible_fieldnames_str
        ]

    for field_name in fields.values():
        field_name.possible_positions = list(
            filter(lambda p: field_name in p.possible_fields, positions))
    location_per_fieldname = find_field_positions(positions,
                                                  list(fields.values()))
    return mult(your_ticket[location_per_fieldname[f]]
                for f in location_per_fieldname.keys()
                if f.startswith('departure'))
Esempio n. 8
0
def main():
    true_input = read_input(14)
    ansA = processA(true_input)
    assert ansA == 13727901897109

    ansB = processB(true_input, 36)
    assert ansB < 5605677382384
    assert ansB == 5579916171823
def main():
    train=1
    window_sz = 5  #n words to the left, x words to the right
    embeddings_sz = 100 #
    epochs = 10
    
    if train:
        word2idx, idx2word,  sentences_tokens, corpus = util.read_input(filename, most_common=most_common)
        X,Y =get_features(sentences_tokens, word2idx, window_sz, corpus)
        #neg_X, neg_Y =X, Y
        #shuffle(neg_X)
        #shuffle(neg_Y)
        labels =  [1] * len(X)
        #neg_labels =  [0] * len(X)
        #X = X+neg_X
        #Y = Y+neg_Y
        #labels = labels + neg_labels
        X =np.array(X, dtype=float)
        Y =np.array(Y, dtype=float)

        labels = np.array(labels, dtype=float)

        print('X=',X.shape, 'Y=', Y.shape, 'corpus=',len(corpus))
        vocab_size = len(corpus)

        input_target = Input((1,))
        input_context = Input((1,))

        embedding = Embedding(vocab_size, embeddings_sz, input_length=1, name='embedding')
        target = embedding(input_target)
        target = Reshape((embeddings_sz, 1))(target)
        context = embedding(input_context)
        context = Reshape((embeddings_sz, 1))(context)

        # now perform the dot product operation to get a similarity measure
        dot_product = merge([target, context], mode='dot', dot_axes=1)
        dot_product = Reshape((1,))(dot_product)
        # add the sigmoid output layer
        output = Dense(1, activation='sigmoid')(dot_product)


        model = Model(input=[input_target, input_context], outputs=output)
        model.compile(loss='binary_crossentropy', optimizer='rmsprop')

        model.fit([X,Y],labels, epochs=epochs, batch_size=128)
        #for cnt in range(epochs):
         #   loss = model.train_on_batch([X, Y], labels)
        
        #    if cnt % 2 == 0:
         #       print("Iteration {}, loss={}".format(cnt, loss))

    
    embeddings_file = "./output/embeddings_vocab_%s_%s_epochs_%s_skipgram.txt"%(len(corpus), most_common, epochs)

    embeddings = np.transpose(model.get_layer(name='embedding').get_weights()[0])

    util.save_embeddings(embeddings_file, embeddings, idx2word)
Esempio n. 10
0
def main():
    parser = argparse.ArgumentParser(description='Solve pipes problem.')
    parser.add_argument('file_ids',
                        metavar='N',
                        type=int,
                        nargs='+',
                        help='file ids, this will be used as input<id>.txt')
    arg = parser.parse_args()
    for file_id in arg.file_ids:
        solve(*read_input("input/input%s.txt" % file_id))
Esempio n. 11
0
 def __init__(self, filename_data="data/sample_conversations.json", filename_pkl="autocomplete_state.pkl", load=False):
     if load:
         self.load_from_file(filename_pkl)
         return
     self.tt = Trie()
     data = util.read_input(filename_data)
     for line, count in util.get_customer_service_phrases(data).items():
         for i in range(count):
             self.tt.add(line)
     util.save_object(self.tt, filename_pkl)
Esempio n. 12
0
def get_reactions():
    reactions = dict()
    for line in read_input('input14.txt'):
        input_chemicals, output_chemical = line.split('=>')
        reaction = list()
        for chemicals in input_chemicals.split(','):
            quantity, chemical = chemicals.strip().split(' ')
            reaction.append((int(quantity), chemical))
        quantity, chemical = output_chemical.strip().split(' ')
        reactions[(int(quantity), chemical)] = reaction
    return reactions
Esempio n. 13
0
def part2():
    tile_data = parse_input_as_tiles(read_input())
    match_borders(tile_data)
    img = assemble_img(tile_data)

    def find_roughness(image):
        for _ in range(4):
            roughness, n_sea_monsters = find_sea_monsters_in_img(image)
            if n_sea_monsters > 0:
                return roughness
            image = np.rot90(image)

    return find_roughness(img) or find_roughness(np.flipud(img))
Esempio n. 14
0
def problem1():
    sys.setrecursionlimit(15000)

    lines = util.read_input(filename)
    clay, (ymin, ymax) = parse(lines)
    source = (500, ymin)

    water = set()
    drained = set()
    dfs(clay, water, drained, source, ymax)

    util.print_states(clay, "#", water, ".", drained, "_", flip=True)
    print(len(water))
    print(len(water) - len(drained))
Esempio n. 15
0
def problem1(filename):
    sys.setrecursionlimit(15000)

    lines = util.read_input(filename)
    l = lines[0][1:-1]
    graph = {}

    global visited_dfs
    visited_dfs = set()
    dfs(graph, l, 0, (0, 0))

    # util.print_states(graph.keys(), '.')

    prob1, prob2 = max_dist(graph)
    print(prob1)
    print(prob2)
Esempio n. 16
0
def solve(year: int, day: int) -> None:
    click.echo(f'Year {year}, Day {day}')
    module = import_module(f'{year}.{day:02d}')
    data = read_input(year, day)

    tc1 = read_tc(year, day, 1)
    if tc1:
        test(module.solve_1, tc1)
    part_1_time, part_1_solution = timed(module.solve_1)(data)
    click.echo(f'Solution 1: {part_1_solution}, Took: {part_1_time}ms')

    tc2 = read_tc(year, day, 2)
    if tc2:
        test(module.solve_2, tc2)
    part_2_time, part_2_solution = timed(module.solve_2)(data)
    click.echo(f'Solution 2: {part_2_solution}, Took: {part_2_time}ms')
Esempio n. 17
0
def problem1():
    lines = util.read_input(filename)

    count = 0
    i = 0
    while i < len(lines) and lines[i]:
        before = util.parse_int_line(lines[i])
        op = util.parse_int_line(lines[i + 1])
        after = util.parse_int_line(lines[i + 2])

        m = test_op(before, op, after)
        if len(m) >= 3:
            count += 1

        i += 4

    print(count)
Esempio n. 18
0
def p2(file_name):

    count_by_letter = {}
    data = read_input(file_name)

    for a in string.ascii_lowercase:
        heap = []
        for c in data[0]:
            if c.lower() != a:
                heap.append(c)

        new_heap = []
        for c in heap:
            add_to_heap(c, new_heap)
        count_by_letter[a] = len(new_heap)

    return min(count_by_letter.items(), key=lambda x: x[1])
Esempio n. 19
0
def get_fabric_overlap(file_name: str) -> int:
    '''
    Return square pixel area of overlapping fabric
    '''
    data = read_input('day_3_input_part_1.txt')
    fabric = np.zeros((1000, 1000))
    for line in data:
        claim = parse_claim(line)

        top_offset = claim['top_offset']
        left_offset = claim['left_offset']

        for i in range(top_offset, top_offset + claim['height']):
            for j in range(left_offset, left_offset + claim['width']):
                fabric[i][j] += 1.0

    return np.count_nonzero(fabric > 1)
Esempio n. 20
0
def solve(input_path):
  # Use an array so we can edit-in-place without making copies of the whole instruction set.
  instructions = [ x for x in util.read_input(input_path) ]
  for pos in range(len(instructions)):
    saved_instruction = instructions[pos]
    cmd = saved_instruction[0]
    val = saved_instruction[1]
    r = None
    if cmd == 'nop':
      instructions[pos] = ('jmp', val)
      r = final_acc(instructions)
    elif cmd == 'jmp':
      instructions[pos] = ('nop', val)
      r = final_acc(instructions)
    instructions[pos] = saved_instruction
    if r is not None:
      return r
  raise AssertionError("Tried everything but the program never terminated")
Esempio n. 21
0
def problem1():
    lines = util.read_input(filename)
    stop = int(lines[0])
    # stop = 2018
    scores = [3, 7]

    elf1 = 0
    elf2 = 1
    while len(scores) < stop + 10:
        s = scores[elf1] + scores[elf2]
        for d in str(s):
            scores.append(int(d))
        elf1 = (elf1 + scores[elf1] + 1) % len(scores)
        elf2 = (elf2 + scores[elf2] + 1) % len(scores)

    for s in scores[stop:stop + 10]:
        print(s, end="")
    print()
Esempio n. 22
0
def problem2():
    lines = util.read_input(filename)

    possible_ops = {}

    i = 0
    while i < len(lines) and lines[i]:
        before = util.parse_int_line(lines[i])
        op = util.parse_int_line(lines[i + 1])
        after = util.parse_int_line(lines[i + 2])
        i += 4

        m = test_op(before, op, after)
        opcode = op[0]
        possible_ops[opcode] = possible_ops.get(opcode, []) + [set(m)]

    op_map = {}
    used_ops = set()
    while len(op_map) < len(possible_ops):
        for k, v in possible_ops.items():
            op_set = None
            for op_results in v:
                if op_set is None:
                    op_set = op_results - used_ops
                else:
                    op_set &= op_results
            if len(op_set) != 1:
                print("failed to find unique op!! " + str(op_set))
            else:
                opcode = op_set.pop()
                op_map[k] = opcode
                used_ops |= {opcode}

    i += 2
    lines = lines[i:]
    ops = list(map(lambda l: util.parse_int_line(l), lines))
    regs = [0, 0, 0, 0]

    for op in ops:
        op_method = OPS[op_map[op[0]]]
        op_method(regs, op)

    print(regs)
Esempio n. 23
0
def solve(input_path):
    instructions = list(util.read_input(input_path))
    executed = [False] * len(instructions)
    pos = 0
    acc = 0
    while not executed[pos]:
        executed[pos] = True
        cmd = instructions[pos][0]
        val = instructions[pos][1]
        if cmd == 'acc':
            acc = acc + val
            pos = pos + 1
        elif cmd == 'jmp':
            pos = pos + val
        elif cmd == 'nop':
            pos = pos + 1
        else:
            raise AssertionError('Unrecognized operation ' + cmd)
    return acc
Esempio n. 24
0
def problem1():
    pad = 40
    lines = util.read_input(filename)
    initial, trans = parse(lines)
    state = '.' * pad + initial + '.' * pad
    for gen in range(20):
        print(state)
        newState = ".."
        for i in range(2, len(state) - 2):
            newState += trans.get(state[i-2:i+3], '.')
        newState += ".."
        state = newState
    print(state)

    sum = 0
    for i in range(len(state)):
        if state[i] == "#":
            sum += (i-pad)
    print(sum)
Esempio n. 25
0
    def run(self):
        # create the conference object
        conference = Conference(
            title=self.global_config.get('conference', 'title'),
            acronym=self.global_config.get('conference', 'acronym'),
            day_count=int(self.global_config.get('conference', 'day_count')),
            start=parse_date(self.global_config.get('conference', 'start')),
            end=parse_date(self.global_config.get('conference', 'end')),
            time_slot_duration=parse_duration(
                self.global_config.get('conference', 'time_slot_duration')))
        slug = StandardSlugGenerator(conference)
        schedule = Schedule(conference=conference)
        rec_license = self.global_config.get('conference', 'license')

        content = read_input(self.config['path'])
        with StringIO(content) as csv_file:
            reader = csv.DictReader(csv_file, delimiter=',')
            for row in reader:
                if row['Room'] == '' and row['ID'] == '' and row['Title'] == '':
                    continue

                schedule.add_room(row['Room'])
                speakers = {}
                for pair in row['Speakers'].split('|'):
                    uid, _, name = pair.partition(":")
                    speakers[int(uid)] = name
                schedule.add_event(
                    int(row['Day']), row['Room'],
                    Event(uid=row['ID'],
                          date=parse_datetime(row['Date'] + 'T' +
                                              row['Start'] + ':00'),
                          start=parse_time(row['Start']),
                          duration=parse_duration(row['Duration']),
                          slug=slug,
                          title=row['Title'],
                          description=row.get('Description', ''),
                          abstract=row.get('Abstract', ''),
                          language=row['Language'],
                          persons=speakers,
                          download_url=row.get('File URL', ''),
                          recording_license=rec_license))

        return schedule
Esempio n. 26
0
def problem2(filename):
    for boost in range(10000):
        lines = util.read_input(filename)

        armies = parse(lines)

        for a in armies[0]:
            a.damage += boost

        remaining = fight(armies[0] + armies[1])
        if remaining is None:
            continue

        s = sum(map(lambda a: a.size, remaining))
        team = None
        if len(remaining):
            team = remaining[0].side
        print(boost, s, team)
        if team == 1:
            return
def test_trie():
    # some basic functions
    tt = autocomplete.Trie()
    tt.add("Hello world")
    assert len(tt) == 1
    assert "Hello world" in tt
    tt.add("Hello World")
    assert len(tt) == 2
    assert "Hello World" in tt
    assert "Hello world" in tt
    assert "hello world - i'm not supposed to be in the trie" not in tt
    assert "Hello World aaaaaaaaaaaaaa" not in tt
    tt.add("Hello World")
    assert len(tt) == 2
    assert "Hello World" in tt
    assert "Hello world" in tt
    assert "hello world" not in tt
    assert "" not in tt
    assert "H" not in tt
    assert "" not in tt
    assert tt.__contains__("Hello", check_end=False)
    tt.clear()
    assert len(tt) == 0
    print("basic tests cleared")

    # set equivalence and memory constraints
    data = util.read_input("data/sample_conversations.json")
    all_convos_set = set()
    for line, count in util.get_customer_service_phrases(data).items():
        for i in range(count):
            # test duplication
            all_convos_set.add(line)
            tt.add(line)

    for line in all_convos_set:
        assert line in tt

    assert len(tt) == len(all_convos_set)
    print("I have %d phrases saved now" % len(tt))
    print("large tests cleared")
    util.save_object(tt, "test_autocomplete_state.pkl")
Esempio n. 28
0
def problem1():
    lines = util.read_input(filename)
    regs = [0] * 6
    ip_reg = util.parse_int_line(lines[0])[0]
    ops = []
    args = []
    for l in lines[1:]:
        ops.append(l.split(" ")[0])
        args.append(util.parse_int_line(l))

    while True:
        ip = regs[ip_reg]
        if not (ip >= 0 and ip < len(ops)):
            break
        o = ops[ip]
        a = args[ip]
        operations[o](regs, a)
        regs[ip_reg] += 1
        # print(regs[ip_reg], regs)

    print(regs, regs[0])
Esempio n. 29
0
def main():
    train = 1
    window_sz = 5  #n words to the left, x words to the right
    embeddings_sz = 10  #

    if train:
        word2idx, idx2word, sentences_tokens, corpus = util.read_input(
            './data/test.en')
        X, Y = get_features(sentences_tokens, word2idx, window_sz, corpus)

        X = np.array(X, dtype=float)
        Y = np.array(Y, dtype=float)
        print('X=', X.shape, 'Y=', Y.shape, 'corpus=', len(corpus))
        model = Sequential()
        model.add(Dense(embeddings_sz, activation='linear', input_dim=1))
        #model.add(Flatten())
        model.add(Dense(len(corpus), activation='softmax'))
        model.compile(loss='binary_crossentropy', optimizer='rmsprop')
        model.fit(X, Y, epochs=100, batch_size=128)
    print('shape=', len(model.layers[0].get_weights()), 'weights=',
          model.layers[0].get_weights()[0])
Esempio n. 30
0
def problem2():
    pad = 1020
    lines = util.read_input(filename)
    initial, trans = parse(lines)
    state = '.' * pad + initial + '.' * pad
    for gen in range(1000):
        print(state)
        newState = ".."
        for i in range(2, len(state) - 2):
            newState += trans.get(state[i-2:i+3], '.')
        newState += ".."
        state = newState
    print(state)

    sum = 0
    count = 0
    for i in range(len(state)):
        if state[i] == "#":
            count += 1
            sum += (i-pad)
    print(sum, count)
    print((50000000000 - 1000)*count + sum)
Esempio n. 31
0
        (status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
        
        # Get the UID of the card
        (status,uid) = MIFAREReader.MFRC522_Anticoll()

        # If we have the UID, continue
        if status == MIFAREReader.MI_OK:
            # Print UID
            uid_str = '.'.join([str(id_byte).zfill(3) for id_byte in uid[:4]])
            print "Card id: " + uid_str

            #Se o ID lido consta no banco de dados, solicita a senha
            if uid_str in ids: 
                write_display(lcd, "Digite a senha: ")
                lcd.setPosition(2,0)
                password = read_input(lcd)
                #Se a senha digitada corresponde ao cartão, checa o tipo de usuário
                if password == ids[uid_str][0] \
                    or password == (ids[uid_str][0] + SECURITY_CODE):
                    
                    if password == (ids[uid_str][0] + SECURITY_CODE):
                        log.panic(uid_str)
                        if ids[uid_str][1] == TYPE_USER:
                            msg = "{} - Senha de pânico inserida - CPF: {}".format(
                                datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"), ids[uid_str][2])
                        else:
                            msg = "{} - Senha de pânico inserida - ADMIN".format(
                                datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
                        q.put(msg)

                    print "{} autenticado".format(ids[uid_str][1])