def test_case_34(): with open('training/' + os.listdir('training/')[34]) as f: raw_task = json.load(f) base_entity_finder = EntityFinder( lambda grid: find_components(grid, directions=ALL_DIRECTIONS)) task = tuplefy_task(raw_task) inp = task['train'][0]['input'] out = task['train'][0]['output'] entities = base_entity_finder(inp) color_8 = Property(lambda x: frozenset({8}), np.log(10) - 1, name=f'color {8}', output_types=frozenset({'color'}), entity_finder=base_entity_finder) color_0 = Property(lambda x: frozenset({0}), np.log(10) - 1, name=f'color {0}', output_types=frozenset({'color'}), entity_finder=base_entity_finder) take_color = Property(lambda x: x.entity.colors(), name='the colors', output_types=frozenset({'color'}), entity_finder=base_entity_finder, nll=1, requires_entity=True) select_8 = Selector.make_property_selector(take_color, color_8, True) select_not_8 = Selector.make_property_selector(take_color, color_8, False) select_not_0 = Selector.make_property_selector(take_color, color_0, False) select_not_0.nll = np.log(2) select_not_0_nor_8 = Selector.intersect(select_not_0, select_not_8) selected_entities = select_not_0_nor_8.select(entities) collision = Relation( lambda entity1, entity2: next( iter(collision_directions(entity1, entity2, adjustment=1))) if len(collision_directions(entity1, entity2)) == 1 else None, nll=1 + np.log(2), name='the unique collision vector to', output_types=frozenset({'vector'})) collision_with_8 = Property.from_relation_selector(collision, select_8, base_entity_finder) move_into_8 = Transformer( lambda entities, grid: move(entities, vector_property=collision_with_8, copy=True, extend_grid=False), nll=collision_with_8.nll + np.log(2), name=f"{'copy' if True else 'move'} them by ({collision_with_8})") new_entities, new_grid = move_into_8.transform(selected_entities, inp) assert new_grid == out my_entity_finder = base_entity_finder.compose(select_not_0_nor_8) my_predictor = Predictor(my_entity_finder, move_into_8) for case in task['train'] + task['test']: assert my_predictor.predict(case['input']) == case['output'] my_predictor_2 = Predictor(base_entity_finder, move_into_8)
def test_from_relation_selector(): with open('training/' + os.listdir('training/')[7]) as f: raw_case7 = json.load(f) case7 = tuplefy_task(raw_case7) inp = case7['train'][0]['input'] base_entity_finder = EntityFinder(find_components) entities = base_entity_finder(inp) take_color = Property(lambda x: x.entity.colors(), name='the colors', output_types=frozenset({'color'}), entity_finder=base_entity_finder, nll=1) color_2 = Property(lambda x, i=2: frozenset({2}), np.log(10) - 2, name=f'color {2}', output_types=frozenset({'color'}), entity_finder=base_entity_finder) color_8 = Property(lambda x, i=8: frozenset({8}), np.log(10) - 2, name=f'color {8}', output_types=frozenset({'color'}), entity_finder=base_entity_finder) unique = OrdinalProperty(lambda x: pick_the_unique_value(x), nll=np.log(2), name=f'take the value that is unique', input_types=TYPES) max_ord = OrdinalProperty(lambda x: nth_ordered(x, 0, use_max=True), nll=0, name=f'take the {1} largest') find_collision_vect_8 = Property.from_relation_selector( collision_relation, Selector.make_property_selector(take_color, color_8), entity_finder=base_entity_finder, ordinal_property=unique) # print(type(find_collision_vect_8(entities[1], inp))) assert find_collision_vect_8(entities[1], inp) == (6, 0) find_collision_vect_2 = Property.from_relation_selector( collision_relation, Selector.make_property_selector(take_color, color_2), entity_finder=base_entity_finder, ordinal_property=unique) assert find_collision_vect_2(entities[2], inp) == (-6, 0)
def test_transformers_predictors(): with open('training/' + os.listdir('training/')[7]) as f: raw_case7 = json.load(f) case7 = tuplefy_task(raw_case7) inp = case7['train'][0]['input'] out = case7['train'][0]['output'] base_entity_finder = EntityFinder(find_components) entities = base_entity_finder(inp) take_color = Property(lambda x: x.entity.colors(), name='the colors', output_types=frozenset({'color'}), entity_finder=base_entity_finder, nll=1) color_2 = Property(lambda x, i=2: frozenset({2}), np.log(10) - 2, name=f'color {2}', output_types=frozenset({'color'}), entity_finder=base_entity_finder) color_8 = Property(lambda x, i=8: frozenset({8}), np.log(10) - 2, name=f'color {8}', output_types=frozenset({'color'}), entity_finder=base_entity_finder) select_8 = Selector.make_property_selector(take_color, color_8) select_2 = Selector.make_property_selector(take_color, color_2) max_ord = OrdinalProperty(lambda x: nth_ordered(x, 0, use_max=True), nll=0, name=f'take the {1} largest') find_collision_vect_to_8 = Property.from_relation_selector( collision_relation, select_8, entity_finder=base_entity_finder, ordinal_property=max_ord) my_transformer = Transformer( lambda entities, grid: move(entities, vector_property=find_collision_vect_to_8), name=f'move them by ({find_collision_vect_to_8})', nll=1 + np.log(2)) assert my_transformer.transform(select_2.select(entities))[1] == out select_2_finder = base_entity_finder.compose(select_2) my_predictor = Predictor(select_2_finder, my_transformer) assert my_predictor.predict(inp) == out
def selector_iterator(task: dict, base_entity_finder: EntityFinder, max_nll: float = 20.): start_time = time.perf_counter() inputs = [case['input'] for case in task['train']] inputs.extend([case['input'] for case in task['test']]) earlier_selectors = [] rounds = 0 grid_properties, entity_properties = generate_base_properties( task, base_entity_finder) grid_properties, entity_properties = filter_unlikelies( grid_properties, max_nll), filter_unlikelies(entity_properties, max_nll) entity_properties.sort() entity_properties = [ entity_property for entity_property in entity_properties if entity_property.validate_and_register(task) ] # # Make all grid properties that are the same for all training examples less likely # for grid_property in grid_properties: # if len({grid_property(None, case['input']) for case in task['train']}) == 1 and not grid_property.is_constant: # grid_property.nll += 1 grid_properties = filter_unlikelies(grid_properties, max_nll) grid_properties.sort() grid_properties = [ grid_property for grid_property in grid_properties if grid_property.validate_and_register(task) ] entity_properties.sort() grid_properties.sort() all_properties = entity_properties + grid_properties all_properties.sort() trivial_selector = Selector(lambda entity, grid: True, name='true', nll=0) queue = [trivial_selector] for entity_property in entity_properties: for target_property in all_properties: if (entity_property.count != target_property.count) and (combine_selector_nll( entity_property, target_property) <= max_nll): for the_same in [True, False]: new_selector = Selector.make_property_selector( entity_property, target_property, the_same) if str(entity_property) == "the colors" and str( target_property) == "color 0" and not the_same: # Specially make selecting all the non-0 color entities more likely new_selector.nll = SELECT_NOT_0_NLL if new_selector.validate_and_register( task, base_entity_finder, max_nll): heapq.heappush(queue, new_selector) earlier_selectors = [] while queue: my_selector = heapq.heappop(queue) Selector.previous_selectors.add(str(my_selector)) yield my_selector rounds += 1 # print(my_selector, my_selector.nll) if time.perf_counter() - start_time > MAX_SMALL_TIME: return # Makes properties where the selection produces unique values in all the training cases new_grid_properties = [] common_property_indices = [] for i, training_case in enumerate(task['train']): training_grid = training_case['input'] training_entities = base_entity_finder(training_grid) training_selected_entities = my_selector.select(training_entities) if not training_selected_entities: common_property_indices = [set()] break common_properties = { (i, prop(training_selected_entities[0], training_grid)) for i, prop in enumerate(entity_properties) if prop.count not in my_selector.fixed_properties } for entity in training_selected_entities: common_properties &= { (i, prop(entity, training_grid)) for i, prop in enumerate(entity_properties) if prop.count not in my_selector.fixed_properties } # Extract which entity properties give the same result for all entities selected common_property_indices.append( {prop[0] for prop in common_properties}) valid_common_properties = set.intersection(*common_property_indices) common_grid_properties = [ entity_properties[index].add_selector(my_selector) for index in valid_common_properties ] common_grid_properties.sort() for prop in (prop for prop in entity_properties if prop.count not in my_selector.fixed_properties): for ordinal_property in ORDINAL_PROPERTIES: if combine_property_selector_nll(prop, my_selector, ordinal_property) <= max_nll: grid_property = prop.add_selector(my_selector, ordinal_property) if grid_property in common_grid_properties: # not a problem grid_property.nll -= 1 if grid_property.validate_and_register(task): new_grid_properties.append(grid_property) # Makes sure properties are potentially valid on training examples # all_cases = task['train'] + task['test'] # for i, case in enumerate(all_cases): # training_grid = case['input'] # training_entities = base_entity_finder(training_grid) # valid_indices = [] # for j, entity_prop in enumerate(new_entity_properties): # for entity in training_entities: # if entity_prop(entity, training_grid) is None: # break # else: # valid_indices.append(j) # new_entity_properties = [new_entity_properties[valid_index] for valid_index in valid_indices] # # Makes relational entity properties from a chosen entity to some selection # new_entity_properties = [ # for relation, ordinal_property in # itertools.product(Relation.relations, ORDINAL_PROPERTIES) # if combine_relation_selector_nll(relation, my_selector, # ordinal_property) <= max_nll # ] # # Now register the properties # for prop in new_entity_properties: # prop.validate_and_register(task) new_entity_properties = [] for relation, ordinal_property in combine_sorted_queues( (Relation.relations, ORDINAL_PROPERTIES), max_nll - my_selector.nll - PROPERTY_CONSTRUCTION_COST): if combine_relation_selector_nll(relation, my_selector, ordinal_property) < max_nll: prop = Property.from_relation_selector( relation, my_selector, entity_finder=base_entity_finder, ordinal_property=ordinal_property, register=False) if prop.validate_and_register(task): new_entity_properties.append(prop) # Make new ordinal grid properties for entity_prop, selector, ordinal_prop in combine_sorted_queues( (new_entity_properties, earlier_selectors, ORDINAL_PROPERTIES), max_nll - PROPERTY_CONSTRUCTION_COST): if combine_property_selector_nll(entity_prop, selector, ordinal_prop) <= max_nll: grid_property = entity_prop.add_selector( selector, ordinal_prop) if grid_property.validate_and_register(task): new_grid_properties.append(grid_property) new_grid_properties.sort() # Now add in the new selectors to the queue for entity_prop, new_prop in combine_sorted_queues( (entity_properties, new_grid_properties), max_nll - SELECTOR_CONSTRUCTION_COST): # Makes a new selector from the base property and the new property if combine_selector_nll(entity_prop, new_prop) <= max_nll: for the_same in [True, False]: new_selector = Selector.make_property_selector( entity_prop, new_prop, the_same=the_same) if new_selector.validate_and_register( task, base_entity_finder, max_nll): heapq.heappush(queue, new_selector) grid_properties.extend(new_grid_properties) grid_properties.sort() all_properties = grid_properties + entity_properties all_properties.sort() for new_prop, grid_prop in combine_sorted_queues( (new_entity_properties, all_properties), max_nll - SELECTOR_CONSTRUCTION_COST): # Makes a new selector from the base property and the new property if combine_selector_nll(new_prop, grid_prop) <= max_nll: for the_same in [True, False]: new_selector = Selector.make_property_selector( new_prop, grid_prop, the_same=the_same) if new_selector.validate_and_register( task, base_entity_finder, max_nll): heapq.heappush(queue, new_selector) entity_properties.extend(new_entity_properties) entity_properties.sort() earlier_selectors.append(my_selector) earlier_selectors.sort()