def combine_assertions(input_filename, output_file): """ Take in a tab-separated, sorted "CSV" files, indicated by `input_filename`, that should be grouped together into assertions. Output a msgpack stream of assertions to `output_file`. The input file should be made from multiple sources of assertions by concatenating and sorting them. The combined assertions will all have the dataset of the first edge that produces them, and the license of the strongest license being combined. This process requires its input to be a sorted CSV so that all edges for the same assertion will appear consecutively. """ def group_func(line): "Group lines by their URI (their first column)." return line.split('\t', 1)[0] out = MsgpackStreamWriter(output_file) out_bad = MsgpackStreamWriter(output_file + '.reject') with open(input_filename, encoding='utf-8') as stream: for key, line_group in itertools.groupby(stream, group_func): assertion = make_assertion(line_group) if assertion is None: continue if assertion['weight'] > 0: destination = out else: destination = out_bad destination.write(assertion) out.close() out_bad.close()
def subwords_to_edges(language, input, output): """ Morfessor hypothesizes ways to break words into sub-word chunks. Produce edges from these sub-words that can be used in retrofitting. """ writer = MsgpackStreamWriter(output) for line in input: line = line.rstrip() if not line or line.startswith('#'): continue # Remove the unnecessary count ("1 ") from the start of each line line = line.split(' ', 1)[1] chunks = line.split(' + ') # Strip a possible trailing underscore, which would particularly show # up in the way we segment ATOMIC_SPACE_LANGUAGES (Vietnamese) full_text = ''.join(chunks).strip('_') end = join_uri('c', language, full_text) for chunk in chunks: if chunk != '_': start = join_uri('x', language, chunk.strip('_')) edge = make_edge( '/r/SubwordOf', start, end, dataset='/d/morphology', license=Licenses.cc_attribution, sources=MORPH_SOURCES, weight=0.01, ) writer.write(edge) writer.close()
def run_opencyc(input_file, output_file): """ Read an .nq file containing OpenCyc data, outputting a file of ConceptNet edges and a file of mappings between the Semantic Web and ConceptNet. """ out = MsgpackStreamWriter(output_file) labels = {} unlabels = defaultdict(set) seen_external_urls = set() # Read through the file once, finding all the "preferred labels". We will # use these as the surface texts for the nodes. for subj, pred, obj, _graph in parse_nquads( open(input_file, encoding='utf-8')): if pred['url'] == RDF_LABEL: labels[subj['url']] = obj['text'] unlabels[obj['text']].add(subj['url']) # Read through the file again and extract ConceptNet edges. for subj, pred, obj, _graph in parse_nquads( open(input_file, encoding='utf-8')): rel_name = resource_name(pred['url']) web_subj = subj.get('url') web_obj = obj.get('url') if rel_name == 'subClassOf' and web_obj is not None and web_subj in labels and web_obj in labels: subj_label = labels[web_subj] obj_label = labels[web_obj] if '_' in subj_label or '_' in obj_label: continue if subj_label.startswith('xsd:') or obj_label.startswith('xsd:'): continue subj_words = set(simple_tokenize(subj_label)) obj_words = set(simple_tokenize(obj_label)) if (subj_words & BLACKLIST_WORDS) or (obj_words & BLACKLIST_WORDS): continue if len(subj_words) > 4 or len(obj_words) > 4: continue subj_uri = cyc_to_conceptnet_uri(labels, unlabels, web_subj) obj_uri = cyc_to_conceptnet_uri(labels, unlabels, web_obj) out.write( opencyc_edge('/r/IsA', subj_uri, obj_uri, subj_label, obj_label)) if (subj_uri, web_subj) not in seen_external_urls: out.write(external_url_edge(subj_uri, web_subj)) seen_external_urls.add((subj_uri, web_subj)) if (obj_uri, web_obj) not in seen_external_urls: out.write(external_url_edge(obj_uri, web_obj)) seen_external_urls.add((obj_uri, web_obj)) elif rel_name == 'sameAs' and web_subj in labels and web_obj.startswith( 'http://umbel.org/'): subj_label = labels[web_subj] subj_uri = standardized_concept_uri('en', subj_label) if (subj_uri, web_obj) not in seen_external_urls: out.write(external_url_edge(subj_uri, web_obj)) seen_external_urls.add((subj_uri, web_obj)) out.close()
def json_to_msgpack(input_filename, output_filename): """ Convert a JSON stream (with one object per line) to a msgpack stream. """ out_stream = MsgpackStreamWriter(output_filename) for obj in read_json_stream(input_filename): out_stream.write(obj) out_stream.close()
def combine_assertions(input_filename, core_filename, output_filename): """ Take in a tab-separated, sorted "CSV" files, indicated by `input_filename`, that should be grouped together into assertions. Output a msgpack stream of assertions the file indicated by `output_filename`. The input file should be made from multiple sources of assertions by concatenating and sorting them. The combined assertions will all have the dataset of the first edge that produces them, and the license of the strongest license being combined. This process requires its input to be a sorted CSV so that all edges for the same assertion will appear consecutively. """ def group_func(line): "Group lines by their URI (their first column)." return line.split('\t', 1)[0] out = MsgpackStreamWriter(output_filename) out_bad = MsgpackStreamWriter(output_filename + '.reject') core_prefixes = set() for line in open(core_filename, encoding='utf-8'): core_prefixes.add(uri_prefix(line.strip(), 3)) # Scan through the assertions twice to add derived words to the blocklist blocklist = Blocklist.load(get_support_data_filename(BLOCK_FILENAME)) for iter in range(2): with open(input_filename, encoding='utf-8') as stream: for line in stream: tmp_assertion = _make_assertion([line.strip()]) if tmp_assertion is None: continue blocklist.propagate_blocks(tmp_assertion) with open(input_filename, encoding='utf-8') as stream: for key, line_group in itertools.groupby(stream, group_func): assertion = _make_assertion(line_group) destination = out if assertion is None: continue if assertion['weight'] <= 0: destination = out_bad if blocklist.is_blocked(assertion): destination = out_bad if assertion['rel'] == 'ExternalURL': # discard ExternalURL edges for things that aren't otherwise # in ConceptNet prefix = uri_prefix(assertion['start'], 3) if prefix not in core_prefixes: destination = out_bad destination.write(assertion) out.close() out_bad.close()
def run_opencyc(input_file, output_file): """ Read an .nq file containing OpenCyc data, outputting a file of ConceptNet edges and a file of mappings between the Semantic Web and ConceptNet. """ out = MsgpackStreamWriter(output_file) labels = {} unlabels = defaultdict(set) seen_external_urls = set() # Read through the file once, finding all the "preferred labels". We will # use these as the surface texts for the nodes. for subj, pred, obj, _graph in parse_nquads(open(input_file, encoding='utf-8')): if pred['url'] == RDF_LABEL: labels[subj['url']] = obj['text'] unlabels[obj['text']].add(subj['url']) # Read through the file again and extract ConceptNet edges. for subj, pred, obj, _graph in parse_nquads(open(input_file, encoding='utf-8')): rel_name = resource_name(pred['url']) web_subj = subj.get('url') web_obj = obj.get('url') if rel_name == 'subClassOf' and web_obj is not None and web_subj in labels and web_obj in labels: subj_label = labels[web_subj] obj_label = labels[web_obj] if '_' in subj_label or '_' in obj_label: continue if subj_label.startswith('xsd:') or obj_label.startswith('xsd:'): continue subj_words = set(simple_tokenize(subj_label)) obj_words = set(simple_tokenize(obj_label)) if (subj_words & BLACKLIST_WORDS) or (obj_words & BLACKLIST_WORDS): continue if len(subj_words) > 4 or len(obj_words) > 4: continue subj_uri = cyc_to_conceptnet_uri(labels, unlabels, web_subj) obj_uri = cyc_to_conceptnet_uri(labels, unlabels, web_obj) out.write(opencyc_edge('/r/IsA', subj_uri, obj_uri, subj_label, obj_label)) if (subj_uri, web_subj) not in seen_external_urls: out.write(external_url_edge(subj_uri, web_subj)) seen_external_urls.add((subj_uri, web_subj)) if (obj_uri, web_obj) not in seen_external_urls: out.write(external_url_edge(obj_uri, web_obj)) seen_external_urls.add((obj_uri, web_obj)) elif rel_name == 'sameAs' and web_subj in labels and web_obj.startswith('http://umbel.org/'): subj_label = labels[web_subj] subj_uri = standardized_concept_uri('en', subj_label) if (subj_uri, web_obj) not in seen_external_urls: out.write(external_url_edge(subj_uri, web_obj)) seen_external_urls.add((subj_uri, web_obj)) out.close()
def test_msgpack_to_json(): with TemporaryDirectory(prefix='conceptnet-test') as tmpdir: json_path = os.path.join(tmpdir, 'test.jsons') msgpack_path = os.path.join(tmpdir, 'test.msgpack') writer = MsgpackStreamWriter(json_path) for item in DATA: writer.write(item) writer.close() msgpack_to_json(json_path, msgpack_path) reader = read_json_stream(msgpack_path) for known, read in zip_longest(DATA, reader): eq_(known, read)
def handle_file(input_file, output_file): tree = ET.parse(input_file) out = MsgpackStreamWriter(output_file) root = tree.getroot() lang = root[0][1].attrib[ 'type' ] # language is at position [1] within the child node [0] if len(root) >= 2: for annotation in root[1]: for word in strip_words(annotation.text): start = standardized_concept_uri('mul', annotation.attrib['cp']) end = standardized_concept_uri(lang, word) edge = make_edge(REL, start, end, DATASET, LICENSE, SOURCE) out.write(edge) else: print("No emoji data in {!r}".format(input_file)) out.close()
def read_wiktionary(input_file, db_file, output_file): """ Convert a stream of parsed Wiktionary data into ConceptNet edges. A `db_file` containing all known words in all languages must have already been prepared from the same data. """ db = sqlite3.connect(db_file) out = MsgpackStreamWriter(output_file) for heading, items in segmented_stream(input_file): language = heading['language'] title = heading['title'] dataset = '/d/wiktionary/{}'.format(language) url_title = heading['title'].replace(' ', '_') web_url = 'http://{}.wiktionary.org/wiki/{}'.format( language, url_title) web_source = '/s/resource/wiktionary/{}'.format(language) source = {'contributor': web_source, 'process': PARSER_RULE} # Scan through the 'from' items, such as the start nodes of # translations, looking for distinct etymologies. If we get more than # one etymology for a language, we need to distinguish them as # different senses in that language. all_etyms = { (item['from']['language'], etym_label(language, item['from'])) for item in items if 'language' in item['from'] and item['from']['text'] == title and etym_label(language, item['from']) is not None } word_languages = {wlang for (wlang, _) in all_etyms} for wlang in sorted(word_languages): if valid_language(wlang): cpage = standardized_concept_uri(wlang, title) ld_edge = make_edge('/r/ExternalURL', cpage, web_url, dataset=dataset, weight=0.25, sources=[source], license=Licenses.cc_sharealike) out.write(ld_edge) etym_to_translation_sense = {} language_etym_counts = Counter(lang for (lang, etym) in all_etyms) polysemous_languages = { lang for lang in language_etym_counts if language_etym_counts[lang] > 1 } for item in items: tfrom = item['from'] tto = item['to'] assumed_languages = [language] lang1 = tfrom.get('language') lang2 = tto.get('language') if lang1 and (lang1 not in assumed_languages) and valid_language(lang1): assumed_languages.append(lang1) if lang2 and (lang2 not in assumed_languages) and valid_language(lang2): assumed_languages.append(lang2) cfrom = transform_term(language, tfrom, assumed_languages, db, use_etyms=(lang1 in polysemous_languages)) cpage = cfrom cto = transform_term(language, tto, assumed_languages, db, use_etyms=(lang2 in polysemous_languages)) if cfrom is None or cto is None: continue if uri_prefix(cfrom, 3) == uri_prefix(cto, 3): continue rel, switch = transform_relation(item['rel']) if rel is None: continue if switch: cfrom, cto = cto, cfrom # When translations are separated by sense, use only the first # sense we see for each etymology. That will have the most # representative translations. if item['rel'] == 'translation': etym_key = (tfrom['language'], etym_label(language, tfrom)) sense = tfrom.get('sense', '') if etym_key in etym_to_translation_sense: if etym_to_translation_sense[etym_key] != sense: continue else: etym_to_translation_sense[etym_key] = sense weight = 1. if rel == '/r/EtymologicallyRelatedTo': weight = 0.25 edge = make_edge(rel, cfrom, cto, dataset=dataset, weight=weight, sources=[source], surfaceStart=tfrom['text'], surfaceEnd=tto['text'], license=Licenses.cc_sharealike) out.write(edge) out.close()
def run_wordnet(input_file, output_file): out = MsgpackStreamWriter(output_file) synset_senses = defaultdict(list) sense_synsets = {} synset_labels = defaultdict(list) synset_canonical_labels = {} synset_categories = {} synset_domains = {} synset_glosses = {} synset_disambig = {} synset_uris = {} # First pass: find data about synsets quads = parse_nquads(open(input_file, encoding='utf-8')) for subj_dict, rel_dict, obj_dict, _graph in quads: if 'url' not in subj_dict or 'url' not in rel_dict: continue subj = subj_dict['url'] rel = rel_dict['url'] obj = obj_dict.get('url') objtext = obj_dict.get('text') relname = resource_name(rel) if relname == 'label': if obj_dict['lang'] == 'en': synset_labels[subj].append(objtext) elif relname == 'sameAs': if obj.startswith(WN20_URL): # If we have a link to RDF WordNet 2.0, the URL (URI? IRI?) # will contain a standardized label for this concept, which # we should use when we want to use this synset as the name of # a disambiguation category. RDF WordNet 3.1 assigns synsets # a number of labels in no particular order, making it hard to # determine from 3.1 alone what to name a category. objname = resource_name(obj) parts = objname.split('-')[1:-2] # Handle missing apostrophes label = '-'.join(parts).replace('_s_', "'s_").replace('_s-', "'s_").replace("s__", "s'_").replace("s_-", "s'-").replace('_', ' ') synset_canonical_labels[subj] = label elif relname == 'domain_category': synset_categories[subj] = obj elif relname == 'lexical_domain': target = resource_name(obj) if '.' in target: domain = target.split('.')[1] synset_domains[subj] = domain elif relname == 'gloss': synset_glosses[subj] = objtext elif relname == 'reference': lemma = resource_name(subj) synset = obj synset_senses[synset].append(lemma) sense_synsets[lemma] = synset used_labels = set(synset_canonical_labels.values()) for synset, values in synset_labels.items(): values.sort(key=lambda label: (label in used_labels,) + label_sort_key(label)) if ( synset not in synset_canonical_labels or synset_canonical_labels[synset][0].isupper() and synset_domains.get(synset) == 'person' ): label = values[0] synset_canonical_labels[synset] = label used_labels.add(label) for synset, labels in synset_labels.items(): if synset in synset_categories: category_name = synset_canonical_labels[synset_categories[synset]] else: category_name = synset_domains.get(synset, None) synset_no_fragment = synset.split('#')[0] pos = synset_no_fragment[-1].lower() assert pos in 'nvarsp', synset if pos == 's': pos = 'a' elif pos == 'p': pos = '-' if category_name in ('pert', 'all', 'tops'): category_name = None synset_disambig[synset] = (pos, category_name) canon = synset_canonical_labels[synset] canon_uri = standardized_concept_uri('en', canon, pos, 'wn', category_name) synset_uris[synset] = canon_uri for label in labels: if label != canon: other_uri = standardized_concept_uri('en', label, pos, 'wn', category_name) rel_uri = '/r/Synonym' surface = '[[{0}]] is a synonym of [[{1}]]'.format(label, canon) edge = make_edge( rel_uri, other_uri, canon_uri, dataset=DATASET, surfaceText=surface, license=Licenses.cc_attribution, sources=[SOURCE], weight=2.0 ) out.write(edge) quads = parse_nquads(open(input_file, encoding='utf-8')) for subj_dict, rel_dict, obj_dict, _graph in quads: if 'url' not in subj_dict or 'url' not in rel_dict: continue subj = subj_dict['url'] rel = rel_dict['url'] obj = obj_dict.get('url') relname = resource_name(rel) if relname in REL_MAPPING: rel, frame = REL_MAPPING[relname] reversed_frame = False if rel.startswith('~'): rel = rel[1:] reversed_frame = True rel_uri = '/r/' + rel if obj is not None: obj_uri = synset_uris.get(obj) if obj not in synset_canonical_labels: continue obj_label = synset_canonical_labels[obj] else: text = obj_dict['text'] # Some WordNets use strings with "!" in them to indicate # out-of-band information, such as a missing translation if (not text) or '!' in text: continue lang = obj_dict['lang'] pos, sense = synset_disambig.get(subj, (None, None)) obj_uri = standardized_concept_uri(lang, text, pos, 'wn', sense) obj_label = text if subj not in synset_uris or subj not in synset_canonical_labels: continue subj_uri = synset_uris[subj] subj_label = synset_canonical_labels[subj] license = Licenses.cc_attribution langcode = subj_uri.split('/')[2] if langcode in SHAREALIKE_LANGUAGES: license = Licenses.cc_sharealike if reversed_frame: subj_uri, obj_uri = obj_uri, subj_uri subj_label, obj_label = obj_label, subj_label surface = frame.format('[[%s]]' % subj_label, '[[%s]]' % obj_label) edge = make_edge( rel_uri, subj_uri, obj_uri, dataset=DATASET, surfaceText=surface, license=license, sources=[SOURCE], weight=2.0 ) out.write(edge) for wn_url in sorted(synset_uris): cn_uri = synset_uris[wn_url] edge = make_edge( '/r/ExternalURL', cn_uri, wn_url, dataset=DATASET, license=Licenses.cc_sharealike, sources=[SOURCE], weight=1.0 ) out.write(edge) out.close()
def json_to_msgpack(input_filename, output_filename): out_stream = MsgpackStreamWriter(output_filename) for obj in read_json_stream(input_filename): out_stream.write(obj) out_stream.close()
def process_dbpedia(input_dir, output_file, concept_file): """ Read through multiple DBPedia files and output filtered assertions to `output_file`. """ ok_concepts = read_concept_file(concept_file) input_path = pathlib.Path(input_dir) interlang_path = input_path / 'interlanguage_links_en.tql.bz2' mapped_urls = interlanguage_mapping(interlang_path, ok_concepts) out = MsgpackStreamWriter(output_file) types_path = input_path / 'instance_types_en.tql.bz2' quads = parse_nquads(bz2.open(str(types_path), 'rt')) for subj, pred, obj, _graph in quads: subj_url = subj['url'] if ('Category:' in subj_url or 'File:' in subj_url or 'List_of' in subj_url or '__' in subj_url or 'Template:' in subj_url): continue if subj_url in mapped_urls: subj_concept = translate_dbpedia_url(subj_url) obj_type = un_camel_case(resource_name(obj['url'])) if obj_type not in TYPE_BLACKLIST: obj_concept = standardized_concept_uri('en', obj_type, 'n') if obj_concept not in CONCEPT_BLACKLIST: edge = make_edge('/r/IsA', subj_concept, obj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{ 'contributor': '/s/resource/dbpedia/2015/en' }], weight=0.5, surfaceStart=url_to_label(subj['url']), surfaceEnd=url_to_label(obj['url'])) out.write(edge) for other_url in mapped_urls[subj_url]: if other_url.startswith('http://wikidata.dbpedia.org/'): urledge = make_edge('/r/ExternalURL', subj_concept, other_url, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{ 'contributor': '/s/resource/dbpedia/2015/en' }], weight=1.0) out.write(urledge) else: other_concept = translate_dbpedia_url(other_url) if other_concept: urledge = make_edge( '/r/ExternalURL', other_concept, other_url, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{ 'contributor': '/s/resource/dbpedia/2015/en' }], weight=1.0) out.write(urledge) edge = make_edge( '/r/Synonym', other_concept, subj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{ 'contributor': '/s/resource/dbpedia/2015/en' }], weight=0.5, surfaceStart=url_to_label(other_url), surfaceEnd=url_to_label(subj_url)) out.write(edge) relations_path = input_path / 'mappingbased_objects_en.tql.bz2' quads = parse_nquads(bz2.open(str(relations_path), 'rt')) for subj, pred, obj, _graph in quads: subj_concept = translate_dbpedia_url(subj['url']) obj_concept = translate_dbpedia_url(obj['url']) rel_name = resource_name(pred['url']) if (subj_concept and obj_concept and subj['url'] in mapped_urls and obj['url'] in mapped_urls): if rel_name in RELATIONS: rel = RELATIONS[rel_name] edge = make_edge(rel, subj_concept, obj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{ 'contributor': '/s/resource/dbpedia/2015/en' }], weight=0.5, surfaceStart=url_to_label(subj['url']), surfaceEnd=url_to_label(obj['url'])) out.write(edge) out.close()
def run_wordnet(input_file, output_file): out = MsgpackStreamWriter(output_file) synset_senses = defaultdict(list) sense_synsets = {} synset_labels = defaultdict(list) synset_canonical_labels = {} synset_categories = {} synset_domains = {} synset_glosses = {} synset_disambig = {} synset_uris = {} # First pass: find data about synsets quads = parse_nquads(open(input_file, encoding="utf-8")) for subj_dict, rel_dict, obj_dict, _graph in quads: if "url" not in subj_dict or "url" not in rel_dict: continue subj = subj_dict["url"] rel = rel_dict["url"] obj = obj_dict.get("url") objtext = obj_dict.get("text") relname = resource_name(rel) if relname == "label": if obj_dict["lang"] == "en": synset_labels[subj].append(objtext) elif relname == "sameAs": if obj.startswith(WN20_URL): # If we have a link to RDF WordNet 2.0, the URL (URI? IRI?) # will contain a standardized label for this concept, which # we should use when we want to use this synset as the name of # a disambiguation category. RDF WordNet 3.1 assigns synsets # a number of labels in no particular order, making it hard to # determine from 3.1 alone what to name a category. objname = resource_name(obj) parts = objname.split("-")[1:-2] # Handle missing apostrophes label = ( "-".join(parts) .replace("_s_", "'s_") .replace("_s-", "'s_") .replace("s__", "s'_") .replace("s_-", "s'-") .replace("_", " ") ) synset_canonical_labels[subj] = label elif relname == "domain_category": synset_categories[subj] = obj elif relname == "lexical_domain": target = resource_name(obj) if "." in target: domain = target.split(".")[1] synset_domains[subj] = domain elif relname == "gloss": synset_glosses[subj] = objtext elif relname == "reference": lemma = resource_name(subj) synset = obj synset_senses[synset].append(lemma) sense_synsets[lemma] = synset used_labels = set(synset_canonical_labels.values()) for synset, values in synset_labels.items(): values.sort(key=lambda label: (label in used_labels,) + label_sort_key(label)) if ( synset not in synset_canonical_labels or synset_canonical_labels[synset][0].isupper() and synset_domains.get(synset) == "person" ): label = values[0] synset_canonical_labels[synset] = label used_labels.add(label) for synset, labels in synset_labels.items(): if synset in synset_categories: category_name = synset_canonical_labels[synset_categories[synset]] else: category_name = synset_domains.get(synset, None) synset_no_fragment = synset.split("#")[0] pos = synset_no_fragment[-1].lower() assert pos in "nvarsp", synset if pos == "s": pos = "a" elif pos == "p": pos = "-" if category_name in ("pert", "all", "tops"): category_name = None synset_disambig[synset] = (pos, category_name) canon = synset_canonical_labels[synset] canon_uri = standardized_concept_uri("en", canon, pos, "wn", category_name) synset_uris[synset] = canon_uri for label in labels: if label != canon: other_uri = standardized_concept_uri( "en", label, pos, "wn", category_name ) rel_uri = "/r/Synonym" surface = "[[{0}]] is a synonym of [[{1}]]".format(label, canon) edge = make_edge( rel_uri, other_uri, canon_uri, dataset=DATASET, surfaceText=surface, license=Licenses.cc_attribution, sources=[SOURCE], weight=2.0, ) out.write(edge) quads = parse_nquads(open(input_file, encoding="utf-8")) for subj_dict, rel_dict, obj_dict, _graph in quads: if "url" not in subj_dict or "url" not in rel_dict: continue subj = subj_dict["url"] rel = rel_dict["url"] obj = obj_dict.get("url") relname = resource_name(rel) if relname in REL_MAPPING: pos, sense = synset_disambig.get(subj, (None, None)) if relname == "hypernym" and pos == "v": relname = "hypernym-v" rel, frame = REL_MAPPING[relname] reversed_frame = False if rel.startswith("~"): rel = rel[1:] reversed_frame = True rel_uri = "/r/" + rel if obj is not None: obj_uri = synset_uris.get(obj) if obj not in synset_canonical_labels: continue obj_label = synset_canonical_labels[obj] else: text = obj_dict["text"] # Some WordNets use strings with "!" in them to indicate # out-of-band information, such as a missing translation if (not text) or "!" in text: continue lang = obj_dict["lang"] obj_uri = standardized_concept_uri(lang, text, pos, "wn", sense) obj_label = text if subj not in synset_uris or subj not in synset_canonical_labels: continue subj_uri = synset_uris[subj] subj_label = synset_canonical_labels[subj] license = Licenses.cc_attribution langcode = subj_uri.split("/")[2] if langcode in SHAREALIKE_LANGUAGES: license = Licenses.cc_sharealike if reversed_frame: subj_uri, obj_uri = obj_uri, subj_uri subj_label, obj_label = obj_label, subj_label surface = frame.format("[[%s]]" % subj_label, "[[%s]]" % obj_label) edge = make_edge( rel_uri, subj_uri, obj_uri, dataset=DATASET, surfaceText=surface, license=license, sources=[SOURCE], weight=2.0, ) out.write(edge) for wn_url in sorted(synset_uris): cn_uri = synset_uris[wn_url] edge = make_edge( "/r/ExternalURL", cn_uri, wn_url, dataset=DATASET, license=Licenses.cc_sharealike, sources=[SOURCE], weight=1.0, ) out.write(edge) out.close()
def run_wordnet(input_file, output_file): out = MsgpackStreamWriter(output_file) synset_senses = defaultdict(list) sense_synsets = {} synset_labels = defaultdict(list) synset_canonical_labels = {} synset_categories = {} synset_domains = {} synset_glosses = {} synset_disambig = {} synset_uris = {} # First pass: find data about synsets quads = parse_nquads(open(input_file, encoding='utf-8')) for subj_dict, rel_dict, obj_dict, _graph in quads: if 'url' not in subj_dict or 'url' not in rel_dict: continue subj = subj_dict['url'] rel = rel_dict['url'] obj = obj_dict.get('url') objtext = obj_dict.get('text') relname = resource_name(rel) if relname == 'label': if obj_dict['lang'] == 'en': synset_labels[subj].append(objtext) elif relname == 'sameAs': if obj.startswith(WN20_URL): # If we have a link to RDF WordNet 2.0, the URL (URI? IRI?) # will contain a standardized label for this concept, which # we should use when we want to use this synset as the name of # a disambiguation category. RDF WordNet 3.1 assigns synsets # a number of labels in no particular order, making it hard to # determine from 3.1 alone what to name a category. objname = resource_name(obj) parts = objname.split('-')[1:-2] # Handle missing apostrophes label = '-'.join(parts).replace('_s_', "'s_").replace( '_s-', "'s_").replace("s__", "s'_").replace("s_-", "s'-").replace('_', ' ') synset_canonical_labels[subj] = label elif relname == 'domain_category': synset_categories[subj] = obj elif relname == 'lexical_domain': target = resource_name(obj) if '.' in target: domain = target.split('.')[1] synset_domains[subj] = domain elif relname == 'gloss': synset_glosses[subj] = objtext elif relname == 'reference': lemma = resource_name(subj) synset = obj synset_senses[synset].append(lemma) sense_synsets[lemma] = synset used_labels = set(synset_canonical_labels.values()) for synset, values in synset_labels.items(): values.sort( key=lambda label: (label in used_labels, ) + label_sort_key(label)) if (synset not in synset_canonical_labels or synset_canonical_labels[synset][0].isupper() and synset_domains.get(synset) == 'person'): label = values[0] synset_canonical_labels[synset] = label used_labels.add(label) for synset, labels in synset_labels.items(): if synset in synset_categories: category_name = synset_canonical_labels[synset_categories[synset]] else: category_name = synset_domains.get(synset, None) synset_no_fragment = synset.split('#')[0] pos = synset_no_fragment[-1].lower() assert pos in 'nvarsp', synset if pos == 's': pos = 'a' elif pos == 'p': pos = '-' if category_name in ('pert', 'all', 'tops'): category_name = None synset_disambig[synset] = (pos, category_name) canon = synset_canonical_labels[synset] canon_uri = standardized_concept_uri('en', canon, pos, 'wn', category_name) synset_uris[synset] = canon_uri for label in labels: if label != canon: other_uri = standardized_concept_uri('en', label, pos, 'wn', category_name) rel_uri = '/r/Synonym' surface = '[[{0}]] is a synonym of [[{1}]]'.format( label, canon) edge = make_edge(rel_uri, other_uri, canon_uri, dataset=DATASET, surfaceText=surface, license=Licenses.cc_attribution, sources=[SOURCE], weight=2.0) out.write(edge) quads = parse_nquads(open(input_file, encoding='utf-8')) for subj_dict, rel_dict, obj_dict, _graph in quads: if 'url' not in subj_dict or 'url' not in rel_dict: continue subj = subj_dict['url'] rel = rel_dict['url'] obj = obj_dict.get('url') relname = resource_name(rel) if relname in REL_MAPPING: pos, sense = synset_disambig.get(subj, (None, None)) if relname == 'hypernym' and pos == 'v': relname = 'hypernym-v' rel, frame = REL_MAPPING[relname] reversed_frame = False if rel.startswith('~'): rel = rel[1:] reversed_frame = True rel_uri = '/r/' + rel if obj is not None: obj_uri = synset_uris.get(obj) if obj not in synset_canonical_labels: continue obj_label = synset_canonical_labels[obj] else: text = obj_dict['text'] # Some WordNets use strings with "!" in them to indicate # out-of-band information, such as a missing translation if (not text) or '!' in text: continue lang = obj_dict['lang'] obj_uri = standardized_concept_uri(lang, text, pos, 'wn', sense) obj_label = text if subj not in synset_uris or subj not in synset_canonical_labels: continue subj_uri = synset_uris[subj] subj_label = synset_canonical_labels[subj] license = Licenses.cc_attribution langcode = subj_uri.split('/')[2] if langcode in SHAREALIKE_LANGUAGES: license = Licenses.cc_sharealike if reversed_frame: subj_uri, obj_uri = obj_uri, subj_uri subj_label, obj_label = obj_label, subj_label surface = frame.format('[[%s]]' % subj_label, '[[%s]]' % obj_label) edge = make_edge(rel_uri, subj_uri, obj_uri, dataset=DATASET, surfaceText=surface, license=license, sources=[SOURCE], weight=2.0) out.write(edge) for wn_url in sorted(synset_uris): cn_uri = synset_uris[wn_url] edge = make_edge('/r/ExternalURL', cn_uri, wn_url, dataset=DATASET, license=Licenses.cc_sharealike, sources=[SOURCE], weight=1.0) out.write(edge) out.close()
def process_dbpedia(input_dir, output_file, concept_file): """ Read through multiple DBPedia files and output filtered assertions to `output_file`. """ ok_concepts = read_concept_file(concept_file) input_path = pathlib.Path(input_dir) interlang_path = input_path / 'interlanguage_links_en.tql.bz2' mapped_urls = interlanguage_mapping(interlang_path, ok_concepts) out = MsgpackStreamWriter(output_file) types_path = input_path / 'instance_types_en.tql.bz2' quads = parse_nquads(bz2.open(str(types_path), 'rt')) for subj, pred, obj, _graph in quads: subj_url = subj['url'] if ( 'Category:' in subj_url or 'File:' in subj_url or 'List_of' in subj_url or '__' in subj_url or 'Template:' in subj_url ): continue if subj_url in mapped_urls: subj_concept = translate_dbpedia_url(subj_url) obj_type = un_camel_case(resource_name(obj['url'])) if obj_type not in TYPE_BLACKLIST: obj_concept = standardized_concept_uri('en', obj_type, 'n') if obj_concept not in CONCEPT_BLACKLIST: edge = make_edge( '/r/IsA', subj_concept, obj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=0.5, surfaceStart=url_to_label(subj['url']), surfaceEnd=url_to_label(obj['url']) ) out.write(edge) for other_url in mapped_urls[subj_url]: if other_url.startswith('http://wikidata.dbpedia.org/'): urledge = make_edge( '/r/ExternalURL', subj_concept, other_url, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=1.0 ) out.write(urledge) else: other_concept = translate_dbpedia_url(other_url) if other_concept: urledge = make_edge( '/r/ExternalURL', other_concept, other_url, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=1.0 ) out.write(urledge) edge = make_edge( '/r/Synonym', other_concept, subj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=0.5, surfaceStart=url_to_label(other_url), surfaceEnd=url_to_label(subj_url) ) out.write(edge) relations_path = input_path / 'mappingbased_objects_en.tql.bz2' quads = parse_nquads(bz2.open(str(relations_path), 'rt')) for subj, pred, obj, _graph in quads: subj_concept = translate_dbpedia_url(subj['url']) obj_concept = translate_dbpedia_url(obj['url']) rel_name = resource_name(pred['url']) if ( subj_concept and obj_concept and subj['url'] in mapped_urls and obj['url'] in mapped_urls ): if rel_name in RELATIONS: rel = RELATIONS[rel_name] edge = make_edge( rel, subj_concept, obj_concept, dataset='/d/dbpedia/en', license=Licenses.cc_sharealike, sources=[{'contributor': '/s/resource/dbpedia/2015/en'}], weight=0.5, surfaceStart=url_to_label(subj['url']), surfaceEnd=url_to_label(obj['url']) ) out.write(edge) out.close()
def read_wiktionary(input_file, db_file, output_file): """ Convert a stream of parsed Wiktionary data into ConceptNet edges. A `db_file` containing all known words in all languages must have already been prepared from the same data. """ db = sqlite3.connect(db_file) out = MsgpackStreamWriter(output_file) for heading, items in segmented_stream(input_file): language = heading['language'] title = heading['title'] dataset = '/d/wiktionary/{}'.format(language) url_title = heading['title'].replace(' ', '_') web_url = 'http://{}.wiktionary.org/wiki/{}'.format(language, url_title) web_source = '/s/resource/wiktionary/{}'.format(language) source = { 'contributor': web_source, 'process': PARSER_RULE } # Scan through the 'from' items, such as the start nodes of # translations, looking for distinct etymologies. If we get more than # one etymology for a language, we need to distinguish them as # different senses in that language. all_etyms = { (item['from']['language'], etym_label(language, item['from'])) for item in items if 'language' in item['from'] and item['from']['text'] == title and etym_label(language, item['from']) is not None } word_languages = {wlang for (wlang, _) in all_etyms} for wlang in sorted(word_languages): cpage = standardized_concept_uri(wlang, title) ld_edge = make_edge( '/r/ExternalURL', cpage, web_url, dataset=dataset, weight=0.25, sources=[source], license=Licenses.cc_sharealike ) out.write(ld_edge) etym_to_translation_sense = {} language_etym_counts = Counter(lang for (lang, etym) in all_etyms) polysemous_languages = { lang for lang in language_etym_counts if language_etym_counts[lang] > 1 } for item in items: tfrom = item['from'] tto = item['to'] assumed_languages = [language] lang1 = tfrom.get('language') lang2 = tto.get('language') if lang1 and (lang1 not in assumed_languages) and valid_language(lang1): assumed_languages.append(lang1) if lang2 and (lang2 not in assumed_languages) and valid_language(lang2): assumed_languages.append(lang2) cfrom = transform_term( language, tfrom, assumed_languages, db, use_etyms=(lang1 in polysemous_languages) ) cpage = cfrom cto = transform_term( language, tto, assumed_languages, db, use_etyms=(lang2 in polysemous_languages) ) if cfrom is None or cto is None: continue if uri_prefix(cfrom, 3) == uri_prefix(cto, 3): continue rel, switch = transform_relation(item['rel']) if rel is None: continue if switch: cfrom, cto = cto, cfrom # When translations are separated by sense, use only the first # sense we see for each etymology. That will have the most # representative translations. if item['rel'] == 'translation': etym_key = (tfrom['language'], etym_label(language, tfrom)) sense = tfrom.get('sense', '') if etym_key in etym_to_translation_sense: if etym_to_translation_sense[etym_key] != sense: continue else: etym_to_translation_sense[etym_key] = sense weight = 1. if rel == '/r/EtymologicallyRelatedTo': weight = 0.25 edge = make_edge(rel, cfrom, cto, dataset=dataset, weight=weight, sources=[source], surfaceStart=tfrom['text'], surfaceEnd=tto['text'], license=Licenses.cc_sharealike) out.write(edge) out.close()