def test_knowledge_graph_has_correct_neighbors(self): question = "when was the attendance greater than 5000?" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/wikitables/sample_table.tagged' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) knowledge_graph = table_question_context.get_table_knowledge_graph() neighbors = knowledge_graph.neighbors # '5000' is neighbors with number and date columns. '-1' is in entities because there is a # date column, which is its only neighbor. assert set(neighbors.keys()) == { 'date_column:year', 'number_column:year', 'string_column:year', 'number_column:division', 'string_column:division', 'string_column:league', 'string_column:regular_season', 'number_column:regular_season', 'string_column:playoffs', 'string_column:open_cup', 'number_column:open_cup', 'string_column:avg_attendance', 'number_column:avg_attendance', '5000', '-1' } assert set(neighbors['date_column:year']) == {'5000', '-1'} assert neighbors['number_column:division'] == ['5000'] assert neighbors['string_column:league'] == [] assert neighbors['string_column:regular_season'] == [] assert neighbors['string_column:playoffs'] == [] assert neighbors['string_column:open_cup'] == [] assert neighbors['number_column:avg_attendance'] == ['5000'] assert set(neighbors['5000']) == { 'date_column:year', 'number_column:division', 'number_column:avg_attendance', 'number_column:regular_season', 'number_column:year', 'number_column:open_cup' } assert neighbors['-1'] == ['date_column:year']
def test_rank_number_extraction(self): question = "what was the first tamil-language film in 1943?" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-1.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) _, numbers = table_question_context.get_entities_from_question() assert numbers == [("1", 3), ('1943', 9)]
def test_entity_extraction_from_question_with_quotes(self): question = "how many times does \"friendly\" appear in the competition column?" question_tokens = self.tokenizer.tokenize(question) test_file = 'fixtures/data/wikitables/tables/346.tagged' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) entities, _ = table_question_context.get_entities_from_question() assert entities == [('string:friendly', ['string_column:competition'])]
def test_date_column_type_extraction_1(self): question = "how many were elected?" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-5.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) data = table_question_context.table_data[0] assert "date_column:first_elected" in data
def test_multiword_entity_extraction(self): question = "was the positioning better the year of the france venue or the year of the south korea venue?" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-3.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) entities, _ = table_question_context.get_entities_from_question() assert entities == [("string:france", ["string_column:venue"]), ("string:south_korea", ["string_column:venue"])]
def search(tables_directory: str, data: JsonDict, output_path: str, max_path_length: int, max_num_logical_forms: int, use_agenda: bool, output_separate_files: bool, conservative_agenda: bool) -> None: print(f"Starting search with {len(data)} instances", file=sys.stderr) executor_logger = logging.getLogger( 'weak_supervision.semparse.executors.wikitables_variable_free_executor' ) executor_logger.setLevel(logging.ERROR) tokenizer = WordTokenizer() if output_separate_files and not os.path.exists(output_path): os.makedirs(output_path) if not output_separate_files: output_file_pointer = open(output_path, "w") for instance_data in data: utterance = instance_data["question"] question_id = instance_data["id"] if utterance.startswith('"') and utterance.endswith('"'): utterance = utterance[1:-1] # For example: csv/200-csv/47.csv -> tagged/200-tagged/47.tagged table_file = instance_data["table_filename"].replace("csv", "tagged") target_list = instance_data["target_values"] tokenized_question = tokenizer.tokenize(utterance) table_file = f"{tables_directory}/{table_file}" context = TableQuestionContext.read_from_file(table_file, tokenized_question) world = WikiTablesVariableFreeWorld(context) walker = ActionSpaceWalker(world, max_path_length=max_path_length) correct_logical_forms = [] if use_agenda: agenda = world.get_agenda(conservative=conservative_agenda) allow_partial_match = not conservative_agenda all_logical_forms = walker.get_logical_forms_with_agenda( agenda=agenda, max_num_logical_forms=10000, allow_partial_match=allow_partial_match) else: all_logical_forms = walker.get_all_logical_forms( max_num_logical_forms=10000) for logical_form in all_logical_forms: if world.evaluate_logical_form(logical_form, target_list): correct_logical_forms.append(logical_form) if output_separate_files and correct_logical_forms: with gzip.open(f"{output_path}/{question_id}.gz", "wt") as output_file_pointer: for logical_form in correct_logical_forms: print(logical_form, file=output_file_pointer) elif not output_separate_files: print(f"{question_id} {utterance}", file=output_file_pointer) if use_agenda: print(f"Agenda: {agenda}", file=output_file_pointer) if not correct_logical_forms: print("NO LOGICAL FORMS FOUND!", file=output_file_pointer) for logical_form in correct_logical_forms[:max_num_logical_forms]: print(logical_form, file=output_file_pointer) print(file=output_file_pointer) if not output_separate_files: output_file_pointer.close()
def setUp(self): super().setUp() question_tokens = [Token(x) for x in ['what', 'was', 'the', 'last', 'year', '2013', '?']] self.table_file = self.FIXTURES_ROOT / 'data' / 'wikitables' / 'sample_table.tagged' self.table_context = TableQuestionContext.read_from_file(self.table_file, question_tokens) self.world_with_2013 = WikiTablesVariableFreeWorld(self.table_context) usl_league_tokens = [Token(x) for x in ['what', 'was', 'the', 'last', 'year', 'with', 'usl', 'a', 'league', '?']] self.world_with_usl_a_league = self._get_world_with_question_tokens(usl_league_tokens)
def test_number_comparison_works(self): # TableQuestionContext normlaizes all strings according to some rules. We want to ensure # that the original numerical values of number cells is being correctly processed here. tokens = WordTokenizer().tokenize("when was the attendance the highest?") tagged_file = self.FIXTURES_ROOT / "data" / "corenlp_processed_tables" / "TEST-2.table" context = TableQuestionContext.read_from_file(tagged_file, tokens) executor = WikiTablesVariableFreeExecutor(context.table_data) result = executor.execute("(select_date (argmax all_rows number_column:attendance) date_column:date)") assert result == Date(-1, 11, 10)
def test_date_extraction(self): question = "how many laps did matt kenset complete on february 26, 2006." question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-8.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) _, number_entities = table_question_context.get_entities_from_question( ) assert number_entities == [("2", 8), ("26", 9), ("2006", 11)]
def test_date_extraction_2(self): question = """how many different players scored for the san jose earthquakes during their 1979 home opener against the timbers?""" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-6.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) _, number_entities = table_question_context.get_entities_from_question( ) assert number_entities == [("1979", 12)]
def test_number_extraction(self): question = """how many players on the 191617 illinois fighting illini men's basketball team had more than 100 points scored?""" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-7.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) _, number_entities = table_question_context.get_entities_from_question( ) assert number_entities == [("191617", 5), ("100", 16)]
def test_null_extraction(self): question = "on what date did the eagles score the least points?" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-2.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) entities, numbers = table_question_context.get_entities_from_question() # "Eagles" does not appear in the table. assert entities == [] assert numbers == []
def test_string_column_types_extraction(self): question = "how many were elected?" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-10.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) data = table_question_context.table_data[0] assert "string_column:birthplace" in data assert "string_column:advocate" in data assert "string_column:notability" in data assert "string_column:name" in data
def test_number_and_entity_extraction(self): question = "other than m1 how many notations have 1 in them?" question_tokens = self.tokenizer.tokenize(question) test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-11.table" table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) string_entities, number_entities = table_question_context.get_entities_from_question( ) assert string_entities == [("string:m1", ["string_column:notation"]), ("string:1", ["string_column:position"])] assert number_entities == [("1", 2), ("1", 7)]
def test_numerical_column_type_extraction(self): question = """how many players on the 191617 illinois fighting illini men's basketball team had more than 100 points scored?""" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-7.table' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) data = table_question_context.table_data[0] assert "number_column:games_played" in data assert "number_column:field_goals" in data assert "number_column:free_throws" in data assert "number_column:points" in data
def test_get_knowledge_graph(self): question = "other than m1 how many notations have 1 in them?" question_tokens = self.tokenizer.tokenize(question) test_file = f"{self.FIXTURES_ROOT}/data/corenlp_processed_tables/TEST-11.table" table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) knowledge_graph = table_question_context.get_table_knowledge_graph() entities = knowledge_graph.entities # -1 is not in entities because there are no date columns in the table. assert sorted(entities) == [ '1', 'number_column:notation', 'number_column:position', 'string:1', 'string:m1', 'string_column:mnemonic', 'string_column:notation', 'string_column:position', 'string_column:short_name', 'string_column:swara' ] neighbors = knowledge_graph.neighbors # Each number extracted from the question will have all number and date columns as # neighbors. Each string entity extracted from the question will only have the corresponding # column as the neighbor. assert set(neighbors['1']) == { 'number_column:notation', 'number_column:position' } assert neighbors['string_column:mnemonic'] == [] assert neighbors['string_column:short_name'] == [] assert neighbors['string_column:swara'] == [] assert neighbors['number_column:position'] == ['1'] assert neighbors['number_column:notation'] == ['1'] assert neighbors['string_column:position'] == ['string:1'] assert neighbors['string:1'] == ['string_column:position'] assert neighbors['string:m1'] == ['string_column:notation'] assert neighbors['string_column:notation'] == ['string:m1'] entity_text = knowledge_graph.entity_text assert entity_text == { '1': '1', 'string:m1': 'm1', 'string:1': '1', 'number_column:notation': 'notation', 'string_column:notation': 'notation', 'string_column:mnemonic': 'mnemonic', 'string_column:short_name': 'short name', 'string_column:swara': 'swara', 'string_column:position': 'position', 'number_column:position': 'position' }
def evaluate_logical_form(self, logical_form: str, target_list: List[str]) -> bool: """ Takes a logical form, and the list of target values as strings from the original lisp string, and returns True iff the logical form executes to the target list. """ normalized_target_list = [TableQuestionContext.normalize_string(value) for value in target_list] target_value_list = evaluator.to_value_list(normalized_target_list) try: denotation = self.execute(logical_form) except ExecutionError: logger.warning(f'Failed to execute: {logical_form}') return False if isinstance(denotation, list): denotation_list = [str(denotation_item) for denotation_item in denotation] else: if isinstance(denotation, Date): target_list = [str(self._make_date(target)) for target in target_list] denotation_list = [str(denotation)] denotation_value_list = evaluator.to_value_list(denotation_list) return evaluator.check_denotation(target_value_list, denotation_value_list)
def text_to_instance( self, # type: ignore question: str, table_lines: List[List[str]], target_values: List[str], offline_search_output: List[str] = None) -> Instance: """ Reads text inputs and makes an instance. WikitableQuestions dataset provides tables as TSV files pre-tagged using CoreNLP, which we use for training. Parameters ---------- question : ``str`` Input question table_lines : ``List[List[str]]`` The table content preprocessed by CoreNLP. See ``TableQuestionContext.read_from_lines`` for the expected format. target_values : ``List[str]`` offline_search_output : List[str], optional List of logical forms, produced by offline search. Not required during test. """ # pylint: disable=arguments-differ tokenized_question = self._tokenizer.tokenize(question.lower()) question_field = TextField(tokenized_question, self._question_token_indexers) # TODO(pradeep): We'll need a better way to input CoreNLP processed lines. table_context = TableQuestionContext.read_from_lines( table_lines, tokenized_question) target_values_field = MetadataField(target_values) world = WikiTablesVariableFreeWorld(table_context) world_field = MetadataField(world) # Note: Not passing any featre extractors when instantiating the field below. This will make # it use all the available extractors. table_field = KnowledgeGraphField( table_context.get_table_knowledge_graph(), tokenized_question, self._table_token_indexers, tokenizer=self._tokenizer, include_in_vocab=self._use_table_for_vocab, max_table_tokens=self._max_table_tokens) production_rule_fields: List[Field] = [] for production_rule in world.all_possible_actions(): _, rule_right_side = production_rule.split(' -> ') is_global_rule = not world.is_instance_specific_entity( rule_right_side) field = ProductionRuleField(production_rule, is_global_rule=is_global_rule) production_rule_fields.append(field) action_field = ListField(production_rule_fields) fields = { 'question': question_field, 'table': table_field, 'world': world_field, 'actions': action_field, 'target_values': target_values_field } # We'll make each target action sequence a List[IndexField], where the index is into # the action list we made above. We need to ignore the type here because mypy doesn't # like `action.rule` - it's hard to tell mypy that the ListField is made up of # ProductionRuleFields. action_map = { action.rule: i for i, action in enumerate(action_field.field_list) } # type: ignore if offline_search_output: action_sequence_fields: List[Field] = [] for logical_form in offline_search_output: try: expression = world.parse_logical_form(logical_form) except ParsingError as error: logger.debug( f'Parsing error: {error.message}, skipping logical form' ) logger.debug(f'Question was: {question}') logger.debug(f'Logical form was: {logical_form}') logger.debug(f'Table info was: {table_lines}') continue except: logger.error(logical_form) raise action_sequence = world.get_action_sequence(expression) try: index_fields: List[Field] = [] for production_rule in action_sequence: index_fields.append( IndexField(action_map[production_rule], action_field)) action_sequence_fields.append(ListField(index_fields)) except KeyError as error: logger.debug( f'Missing production rule: {error.args}, skipping logical form' ) logger.debug(f'Question was: {question}') logger.debug(f'Table info was: {table_lines}') logger.debug(f'Logical form was: {logical_form}') continue if len(action_sequence_fields ) >= self._max_offline_logical_forms: break if not action_sequence_fields: # This is not great, but we're only doing it when we're passed logical form # supervision, so we're expecting labeled logical forms, but we can't actually # produce the logical forms. We should skip this instance. Note that this affects # _dev_ and _test_ instances, too, so your metrics could be over-estimates on the # full test data. return None fields['target_action_sequences'] = ListField( action_sequence_fields) if self._output_agendas: agenda_index_fields: List[Field] = [] for agenda_string in world.get_agenda(conservative=True): agenda_index_fields.append( IndexField(action_map[agenda_string], action_field)) if not agenda_index_fields: agenda_index_fields = [IndexField(-1, action_field)] fields['agenda'] = ListField(agenda_index_fields) return Instance(fields)
def __init__(self, table_context: TableQuestionContext) -> None: super().__init__(constant_type_prefixes={ "string": types.STRING_TYPE, "num": types.NUMBER_TYPE }, global_type_signatures=types.COMMON_TYPE_SIGNATURE, global_name_mapping=types.COMMON_NAME_MAPPING) self.table_context = table_context # We add name mapping and signatures corresponding to specific column types to the local # name mapping based on the table content here. column_types = table_context.column_types self._table_has_string_columns = False self._table_has_date_columns = False self._table_has_number_columns = False if "string" in column_types: for name, translated_name in types.STRING_COLUMN_NAME_MAPPING.items( ): signature = types.STRING_COLUMN_TYPE_SIGNATURE[translated_name] self._add_name_mapping(name, translated_name, signature) self._table_has_string_columns = True if "date" in column_types: for name, translated_name in types.DATE_COLUMN_NAME_MAPPING.items( ): signature = types.DATE_COLUMN_TYPE_SIGNATURE[translated_name] self._add_name_mapping(name, translated_name, signature) # Adding -1 to mapping because we need it for dates where not all three fields are # specified. We want to do this only when the table has a date column. This is because # the knowledge graph is also constructed in such a way that -1 is an entity with date # columns as the neighbors only if any date columns exist in the table. self._map_name(f"num:-1", keep_mapping=True) self._table_has_date_columns = True if "number" in column_types or "num2" in column_types: for name, translated_name in types.NUMBER_COLUMN_NAME_MAPPING.items( ): signature = types.NUMBER_COLUMN_TYPE_SIGNATURE[translated_name] self._add_name_mapping(name, translated_name, signature) self._table_has_number_columns = True if "date" in column_types or "number" in column_types or "num2" in column_types: for name, translated_name in types.COMPARABLE_COLUMN_NAME_MAPPING.items( ): signature = types.COMPARABLE_COLUMN_TYPE_SIGNATURE[ translated_name] self._add_name_mapping(name, translated_name, signature) self.table_graph = table_context.get_table_knowledge_graph() self._executor = WikiTablesVariableFreeExecutor( self.table_context.table_data) # TODO (pradeep): Use a NameMapper for mapping entity names too. # For every new column name seen, we update this counter to map it to a new NLTK name. self._column_counter = 0 # Adding entities and numbers seen in questions to the mapping. question_entities, question_numbers = table_context.get_entities_from_question( ) self._question_entities = [entity for entity, _ in question_entities] self._question_numbers = [number for number, _ in question_numbers] for entity in self._question_entities: # These entities all have prefix "string:" self._map_name(entity, keep_mapping=True) for number_in_question in self._question_numbers: self._map_name(f"num:{number_in_question}", keep_mapping=True) # Keeps track of column name productions so that we can add them to the agenda. self._column_productions_for_agenda: Dict[str, str] = {} # Adding column names to the local name mapping. for column_name in table_context.table_data[0].keys(): self._map_name(column_name, keep_mapping=True) self.terminal_productions: Dict[str, str] = {} name_mapping = [(name, mapping) for name, mapping in self.global_name_mapping.items()] name_mapping += [(name, mapping) for name, mapping in self.local_name_mapping.items()] signatures = self.global_type_signatures.copy() signatures.update(self.local_type_signatures) for predicate, mapped_name in name_mapping: if mapped_name in signatures: signature = signatures[mapped_name] self.terminal_productions[ predicate] = f"{signature} -> {predicate}" # We don't need to recompute this ever; let's just compute it once and cache it. self._valid_actions: Dict[str, List[str]] = None
def _get_world_with_question_tokens(self, tokens: List[Token]) -> WikiTablesVariableFreeWorld: table_context = TableQuestionContext.read_from_file(self.table_file, tokens) world = WikiTablesVariableFreeWorld(table_context) return world
def test_table_data(self): question = "what was the attendance when usl a league played?" question_tokens = self.tokenizer.tokenize(question) test_file = f'{self.FIXTURES_ROOT}/data/wikitables/sample_table.tagged' table_question_context = TableQuestionContext.read_from_file( test_file, question_tokens) assert table_question_context.table_data == [{ 'date_column:year': Date(2001, -1, -1), 'string_column:year': '2001', 'number_column:year': 2001.0, 'number_column:division': 2.0, 'string_column:division': '2', 'string_column:league': 'usl_a_league', 'string_column:regular_season': '4th_western', 'number_column:regular_season': 4.0, 'string_column:playoffs': 'quarterfinals', 'string_column:open_cup': 'did_not_qualify', 'number_column:open_cup': None, 'string_column:avg_attendance': '7_169', 'number_column:avg_attendance': 7169.0 }, { 'date_column:year': Date(2005, -1, -1), 'string_column:year': '2005', 'number_column:year': 2005.0, 'number_column:division': 2.0, 'string_column:division': '2', 'string_column:league': 'usl_first_division', 'string_column:regular_season': '5th', 'number_column:regular_season': 5.0, 'string_column:playoffs': 'quarterfinals', 'string_column:open_cup': '4th_round', 'number_column:open_cup': 4.0, 'string_column:avg_attendance': '6_028', 'number_column:avg_attendance': 6028.0 }]