"http://www.w3.org/2004/02/skos/core#note": "P81", "http://www.w3.org/2004/02/skos/core#related": "P76", "http://www.w3.org/2004/02/skos/core#closeMatch": "P77", "http://www.w3.org/2004/02/skos/core#exactMatch": "P78", "http://www.w3.org/2004/02/skos/core#relatedMatch": "P79", "http://www.w3.org/2004/02/skos/core#definition": "P80" } with open('D:/LexBib/terms/SKOS4lwb.json', encoding="utf-8") as f: data = json.load(f)['results']['bindings'] count = 1 for row in data: print('\nTriple [' + str(count) + '], ' + str(len(data) - count) + ' triples left.') lwbs = lwb.getqid("Q7", row['s']['value']) if row['p']['value'] in propmap: if row['o']['type'] == "literal": statement = lwb.updateclaim(lwbs, propmap[row['p']['value']], row['o']['value'].rstrip(), "string") else: lwbo = lwb.getqid("Q7", row['o']['value'].rstrip()) statement = lwb.updateclaim(lwbs, propmap[row['p']['value']], lwbo, "item") elif row['p']['value'] == "http://www.w3.org/2004/02/skos/core#prefLabel": lwb.setlabel(lwbs, row['o']['xml:lang'], row['o']['value'].rstrip()) elif row['p']['value'] == "http://www.w3.org/2004/02/skos/core#altLabel": lwb.setlabel(lwbs, row['o']['xml:lang'], row['o']['value'].rstrip(),
import json import lwb with open('D:/LexBib/terms/langdict.json', encoding="utf-8") as f: langdict = json.load(f) with open('D:/LexBib/languages/publangs.txt', encoding="utf-8") as f: publangs = f.read().split('\n') count = 0 for lang in publangs: count += 1 print('\nLine [' + str(count) + '] of ' + str(len(publangs)) + ': ' + lang) qid = lwb.getqid(["Q8"], lang) # class Language statement = lwb.updateclaim(qid, "P4", langdict[lang]['wdqid'], "url") statement = lwb.updateclaim(qid, "P32", lang, "url") for label in langdict[lang]['labels']: statement = lwb.setlabel(qid, label, langdict[lang]['labels'][label]) print('OK. ' + str(len(publangs) - count) + ' languages left.') print('\nFinished.\n')
"word sense": "Q22504", "word-form": "Q22505", "word-formation": "Q22506", "wordform": "Q22507", "wordnet": "Q22508" } with open(config.datafolder + "/terms/SkE terms for SKOS vocab - batch 1.csv", 'r', encoding="utf-8") as csvfile: csvdict = csv.DictReader(csvfile) for item in csvdict: time.sleep(1) print(str(item)) if item['SKOS Concept URI'] != "": lwbqid = lwb.getqid("Q7", item['SKOS Concept URI']) else: lwbqid = map[item['Keyword4newScheme']] schemeStatement = lwb.updateclaim(lwbqid, "P74", "Q22279", "item") # skos:inScheme SkE #1 scoreStatement = lwb.updateclaim(lwbqid, "P82", item['SkE score'], "string") lwb.setqualifier(lwbqid, "P82", scoreStatement, "P83", "Q22279", "item") lwb.setqualifier(lwbqid, "P82", scoreStatement, "P88", item['Keyword4newScheme'], "string")
import json import lwb # get output generated with babelterms.py with open('D:/LexBib/terms/babeltranslations.json', encoding="utf-8") as f: babeldict = json.load(f) target = {} for subj in babeldict: lwbqid = lwb.getqid("Q7", subj, onlyknown=True) target[lwbqid.replace("http://data.lexbib.org/entity/", "")] = babeldict[subj] with open('D:/LexBib/terms/babeltranslations_lwbqid.json', "w", encoding="utf-8") as f: json.dump(target, f, indent=2)
# iterate through zotero properties creatorvals = [] propvals = [] for zp in item: # zp: zotero property val = item[zp] # val: value of zotero property # lexbib zotero tags can contain statements (shortcode for property, and value). # If item as value, and that item does not exist, it is created. if zp == "tags": for tag in val: if tag["tag"].startswith(':event '): event = tag["tag"].replace(":event ", "") if event.startswith( 'http'): # event uri is from outside lexbib qid = lwb.getqid("Q6", event) else: # convert to event uri in lexbib events namespace qid = lwb.getqid( "Q6", 'http://lexbib.org/events#' + event) propvals.append({ "property": "P36", "datatype": "item", "value": qid }) if tag["tag"].startswith(':container '): container = tag["tag"].replace(":container ", "") if container.startswith( 'isbn:') or container.startswith('oclc:'): container = container.replace("-", "") container = container.replace( "isbn:", "http://worldcat.org/isbn/")
while index < totalrows: if index >= 0: #start item in infile if rep > 4: # break 'while' loop after 5 failed attempts to process item print( '\nbibimport.py has entered in an endless loop... abort.') break else: print('\n' + str(index) + ' items processed. ' + str(totalrows - index) + ' list items left.\n') #time.sleep(1) rep += 1 try: item = data[index] qid = lwb.getqid( "Q3", item['lexbibUri']) # Q3: LexBib BibItem class classStatement = lwb.updateclaim(qid, "P5", item['lexbibClass'], "item") for triple in item['creatorvals']: #check if creator with that position is already there as item (not literal) skip = False if triple['property'] == "P39": itemprop = "P12" elif triple['property'] == "P42": itemprop = "P13" for Qualifier in triple['Qualifiers']: if Qualifier['property'] == "P33": listpos = Qualifier['value'] print( 'Found ' + triple['property'] +
print('\nGot ISSN ' + issn + ' data from Wikidata.') wdqid = datalist[0]['journal']['value'] label = datalist[0]['journalLabel']['value'] success = 1 regexp = re.compile(r'Q\d+') if regexp.search(label): label = "" except Exception as ex: print("ISSN " + issn + " not found on wikidata, skipping, will add to orphaned list.") orphaned += issn + '\tnot found on wikidata.\n' continue # create lwb serial for this orphaned issn lwbqid = lwb.getqid("Q20", wdqid) # for serials, wdqid is also lexbib uri statement = lwb.updateclaim(lwbqid, "P3", wdqid, "url") statement = lwb.updateclaim(lwbqid, "P20", issn, "string") statement = lwb.updateclaim(lwbqid, "P4", wdqid, "url") statement = lwb.setlabel(lwbqid, "en", label) # add P46 "contained in serial" to bibitems with that issn # get bibitems url = "https://data.lexbib.org/query/sparql?format=json&query=PREFIX%20lwb%3A%20%3Chttp%3A%2F%2Fdata.lexbib.org%2Fentity%2F%3E%0APREFIX%20ldp%3A%20%3Chttp%3A%2F%2Fdata.lexbib.org%2Fprop%2Fdirect%2F%3E%0A%0Aselect%20%3FbibItem%20%3Fissn%20%3Fjournal%20where%0A%7B%20%3FbibItem%20ldp%3AP5%20lwb%3AQ3%20.%0A%20%20%3FbibItem%20ldp%3AP20%20%3Fissn%20.%0A%20%3Fjournal%20ldp%3AP5%20lwb%3AQ20%20.%0A%20%3Fjournal%20ldp%3AP20%20%3Fissn%20.%0A%20FILTER%20%28%3Fissn%20%3D%20%22" + issn + "%22%29%7D" done = False while (not done): try: r2 = requests.get(url) bindings2 = r2.json()['results']['bindings'] except Exception as ex:
import lwb # get csv (part of google spreadsheet used for manual BabelID annotation) with open('D:/LexBib/terms/term_bnid_status_labels.csv') as csvfile: termdict = csv.DictReader(csvfile) termlist = list(termdict) print(str(termlist)) totalrows = len(termlist) #print(str(termdict)) count = 1 processed = [] for row in termlist: print('\nNow processing term ' + str(count) + ' of ' + str(totalrows) + ': ' + row["term"]) lwbqid = lwb.getqid("Q7", row['term']) if row['term'] not in processed and row["status"] != "": if row['bnid'].startswith("bn:"): statement = lwb.updateclaim(lwbqid, "P86", row['bnid'], "string") qualifier = lwb.setqualifier(lwbqid, "P86", statement, "P87", row['status'], "string") reference = lwb.setref(statement, "P3", row['term'], "url") elif row['bnid'] == "" and row['status'] == "0": statement = lwb.updateclaim(lwbqid, "P86", "novalue", "novalue") qualifier = lwb.setqualifier(lwbqid, "P86", statement, "P87", "0", "string") reference = lwb.setref(statement, "P3", row['term'], "url") processed.append(row['term']) count += 1
import json import lwb with open('D:/LexBib/countries/query.json', encoding="utf-8") as f: dict = json.load(f) count = 0 for item in dict: count += 1 lwbqid = lwb.getqid("Q8654", item['country']) lwb.setlabel(lwbqid, "en", item['countryLabel']) lwb.stringclaim(lwbqid, "P4", item['country']) print('OK. ' + str(len(dict) - count) + ' countries left.')
import json import lwb import csv propmap = { # "http://www.w3.org/2004/02/skos/core#broader": "P72", # "http://www.w3.org/2004/02/skos/core#inScheme": "P74", # "http://www.w3.org/2004/02/skos/core#narrower": "P73", # "http://www.w3.org/2004/02/skos/core#topConceptOf": "P75", # "http://www.w3.org/2004/02/skos/core#note": "P81", # "http://www.w3.org/2004/02/skos/core#related": "P76", # "http://www.w3.org/2004/02/skos/core#closeMatch": "P77", # "http://www.w3.org/2004/02/skos/core#exactMatch": "P78", # "http://www.w3.org/2004/02/skos/core#relatedMatch": "P79", "http://www.w3.org/2004/02/skos/core#definition": "P80" } with open('D:/LexBib/terms/SKOS_defs_fix.csv', encoding="utf-8") as f: data = csv.DictReader(f) count = 1 for row in data: print('\nDef [' + str(count) + ']: ' + row['subject']) lwbs = lwb.getqid("Q7", row['subject']) statement = lwb.updateclaim(lwbs, "P80", row['def'], "string") reference = lwb.setref(statement, "P3", row['subject'], "url") # lwb.setlabel(lwbs, row['o']['xml:lang'], row['o']['value'].rstrip(), type="alias") count += 1