def test_1():
    from util import read_file_to_list
    from util import process
    data = read_file_to_list("testinput.txt")
    terminate, acc = process(data)
    assert terminate == False
    assert acc == 5
Exemple #2
0
def test_play_recursive():
    from util import play_recursive
    from util import read_file_to_list
    from util import count
    p1, p2 = read_file_to_list("testinput.txt")
    p, _ = play_recursive(p1, p2, 0)
    assert 291 == count(p)
Exemple #3
0
def test_play():
    from util import play
    from util import read_file_to_list
    from util import count
    p1, p2 = read_file_to_list("testinput.txt")
    p = play(p1, p2)
    assert 306 == count(p)
Exemple #4
0
def test_2():
    from util import read_file_to_list
    from util import countBags

    data = read_file_to_list("testinput2.txt")
    result = countBags(data, "shiny gold") - 1
    assert 126 == result
Exemple #5
0
def test_count():
    from util import read_file_to_list
    from util import find_solution
    from util import find_pattern
    from util import concat_Tiles

    tiles = read_file_to_list("testinput.txt")

    #optimazation, create variants once
    for tile in tiles:
        tile.createVariants()

    solution = find_solution(tiles, [], set(), 0, 0)

    assert 20899048083289 == solution[0][0].id * solution[0][-1].id * solution[
        -1][0].id * solution[-1][-1].id

    monster = []
    monster.append("                  # ")
    monster.append("#    ##    ##    ###")
    monster.append(" #  #  #  #  #  #   ")

    map = concat_Tiles(solution)
    map.createVariants()

    for m in map.getVariants():
        r = find_pattern(m, monster)
        if len(r) == 0: continue
        assert 273 == m.count() - len(r) * 15
def test_2():
    from util import read_file_to_list
    from util import process
    from util import fix_command
    data = read_file_to_list("testinput.txt")
    data1 = fix_command(data, 7)
    terminate, acc = process(data1)
    assert terminate == True
    assert acc == 8
Exemple #7
0
def test_process():
    from util import read_file_to_list
    from util import match
    rules, rows = read_file_to_list("testinput2.txt")

    valid = 0
    for row in rows:
        if match(rules, row):
            valid += 1
    assert valid == len(rows)
Exemple #8
0
def test_1():
    from util import read_file_to_list
    from util import hasShinyGold
    result = 0
    data = read_file_to_list("testinput.txt")
    for key in data:
        if key == "shiny gold":
            continue  #Ignore
        if hasShinyGold(data, key):
            result = result + 1
    assert 4 == result
Exemple #9
0
def test_process():
    from util import read_file_to_list
    from util import match
    rules, rows = read_file_to_list("testinput.txt")

    assert 6 == len(rules)
    assert 5 == len(rows)

    assert True == match(rules, rows[0])
    assert False == match(rules, rows[1])
    assert True == match(rules, rows[2])
    assert False == match(rules, rows[3])
    assert False == match(rules, rows[4])
 def get_type1_domains(self):
     type1_domains = list()
     if not self.domains_file:
         self.type1_domains = type1_domains
         self.features['type1_domains_found'] = 0
         return
     type1_domains_from_file = read_file_to_list(self.domains_file)
     type1_domains_from_file = [self.unify_domain(domain) for domain in type1_domains_from_file]
     for domain in self.domains:
         unified_domain = self.unify_domain(domain)
         if unified_domain in type1_domains_from_file and unified_domain not in type1_domains:
             type1_domains.append(unified_domain)
     self.type1_domains = type1_domains
     self.features['type1_domains_found'] = len(type1_domains_from_file)
Exemple #11
0
def test_read():
    from util import read_file_to_list
    from util import scanning
    fields, tickets = read_file_to_list('testinput.txt')

    assert 3 == len(fields)
    assert 5 == len(tickets)
    assert 3 == len(tickets[0])

    error_rate, positions = scanning(fields, tickets)
    assert 71 == error_rate
    assert positions[0] == {'row'}
    assert positions[1] == {'class'}
    assert positions[2] == {'seat'}
Exemple #12
0
def test_count():
    from util import read_file_to_list
    from util import flip_tiles
    from util import daily_flip_tiles
    from util import Tiles
    lines = read_file_to_list("testinput.txt")
    tiles = Tiles()
    for line in lines:
        flip_tiles(tiles, line)
    c = tiles.countBlack()
    assert 10 == c

    daily_flip_tiles(tiles)
    c = tiles.countBlack()
    assert 15 == c
Exemple #13
0
from util import read_file_to_list
from part1 import part_one_solution
from part2 import part_two_solution

tests = [{
    'right': 1,
    'down': 1
}, {
    'right': 3,
    'down': 1
}, {
    'right': 5,
    'down': 1
}, {
    'right': 7,
    'down': 1
}, {
    'right': 1,
    'down': 2
}]

if __name__ == '__main__':
    inputs = read_file_to_list('input.txt')

    part_one_answer = part_one_solution(inputs)
    print(f"Part 1: {part_one_answer}")

    part_two_answer = part_two_solution(inputs, tests=tests)
    print(f'Part 2: {part_two_answer}')
    def __init__(self, config):
        """ Creates a Dataset object.

        Args:
        config: dict, session configuration parameters
        
        """

        if not os.path.exists(config['dataset']['path']):
            print 'Error: No dataset found at specified path.'
            exit()

        self._dataset_path = config['dataset']['path']
        self._dataset_data_path = os.path.join(self._dataset_path, 'data/')
        self._dataset_metadata_path = os.path.join(self._dataset_path,
                                                   'metadata/')

        # Suffix of files in the dataset featuring uniform sampling
        self._uniform_file_suffix_ = config['dataset']['uniform_file_suffix']

        # The spatial extent of a single sample
        self._training_sample_spatial_size = np.array(
            config['dataset']['training_samples']['spatial_size'])

        # The amount by which to move spatially when extracting samples from an item in the dataset
        self._sample_step_size = np.mean([
            self._training_sample_spatial_size[0],
            self._training_sample_spatial_size[1]
        ]) / 3

        self._voxel_size = config['model']['output_voxel_size']
        self._validity_thresholds = config['dataset']['validity_thresholds']
        self._special_weights = config['dataset']['special_weights']

        # The locations in each dataset item where a valid sample can be extacted from
        self._sample_locations = {}

        self._num_samples = {'train': 0, 'val': 0}
        self._points = {}
        self._classes = {}

        # Load metadata
        self._item_ids = {
            'train':
            util.read_file_to_list(
                os.path.join(self._dataset_metadata_path, 'train_split.txt')),
            'val':
            util.read_file_to_list(
                os.path.join(self._dataset_metadata_path,
                             'validation_split.txt'))
        }
        self._all_classes = util.read_file_to_list(
            os.path.join(self._dataset_metadata_path, 'class_names.txt'))
        self._learnable_classes = list(
            config['dataset']['classes'])  # Make a copy
        self._colors = [[
            int(c) for c in color.split(' ')
        ] for color in util.read_file_to_list(
            os.path.join(self._dataset_metadata_path, 'colors.txt'))]

        # Map dataset classes to learnable classes
        self._all_to_learnable_class_mapping = {}
        for learnable_class_i, learnable_class in enumerate(
                self._learnable_classes):

            if learnable_class not in self._all_classes:
                print "Error: classes list, not all classes present in class_names.txt"
                exit()

            self._all_to_learnable_class_mapping[self._all_classes.index(
                learnable_class)] = learnable_class_i

        self._learnable_classes += ['empty', 'masked']
        self._masked_class_id = len(self._learnable_classes) - 1
        self._empty_class_id = len(self._learnable_classes) - 2
        self._num_dataset_classes = len(self._all_classes)
        self._num_learnable_classes = len(self._learnable_classes)

        # Add these values to config so they are available during evaluation/inference
        config['dataset']['empty_class_id'] = self._empty_class_id
        config['dataset'][
            'num_learnable_classes'] = self._num_learnable_classes

        config_for_hash = config['dataset'].copy()
        config_for_hash.pop('refresh_cache', None)

        self.cache_hash_ = hash("".join(self._learnable_classes) +
                                "".join(self._item_ids['train']) +
                                "".join(self._item_ids['val']) +
                                str(self._sample_step_size) +
                                json.dumps(config_for_hash, sort_keys=True))
Exemple #15
0
#!/usr/bin/env python

from util import read_file_to_list
from part1 import part_one_solution
from part2 import part_two_solution

if __name__ == '__main__':
    entries = read_file_to_list('input.txt')

    part_one_answer = part_one_solution(entries)
    print(f'Part 1: {part_one_answer}')

    part_two_answer = part_two_solution(entries)
    print(f'Part 2: {part_two_answer}')