class om(): def __init__(self, uri_or_fp): self.b = Brain() self.b.learn(uri_or_fp) self.o = self.b.getOntology() self.bsfp = self.b.getBidiShortFormProvider( ) # uses .getEntity(<string> shortForm), .getShortForm(OWLEntity entity) def get_OP_list(self): s = self.o.getObjectPropertiesInSignature() out = [] for r in s: out.append(self.bsfp.getShortForm(r)) return out def roll_pdm(self, id_name): # TODO - get subsets + regular domain and range out = {} relations = self.get_OP_list() local_domains = [] local_ranges = [] for r in relations: #Assumes everything in file has a shorthand! # Better to key primary model on shortFormID and then re-key after reading. shorthand = self.b.getAnnotation(r, "shorthand") out[shorthand] = {} out[shorthand]['label'] = self.b.getLabel(r) try: out[shorthand]['usage'] = self.b.getAnnotation(r, "usage") out[shorthand]['defn'] = self.b.getAnnotation(r, "IAO_0000115") local_domains = self.b.getAnnotation(r, "local_domain").split(" ") local_ranges = self.b.getAnnotation(r, "local_range").split(" ") except: pass if local_domains: ldd = {} for ld in local_domains: if ld in id_name.keys(): ldd[ld] = id_name[ld] else: ldd[ld] = '' out[shorthand]['local_domain'] = str(ldd) if local_ranges: lrd = {} for lr in local_ranges: if lr in id_name.keys(): lrd[lr] = id_name[lr] else: lrd[lr] = '' out[shorthand]['local_range'] = str(lrd) return out
class om(): def __init__(self, uri_or_fp): self.b = Brain() self.b.learn(uri_or_fp) self.o = self.b.getOntology() self.bsfp = self.b.getBidiShortFormProvider() # uses .getEntity(<string> shortForm), .getShortForm(OWLEntity entity) def get_OP_list(self): s = self.o.getObjectPropertiesInSignature() out = [] for r in s: out.append(self.bsfp.getShortForm(r)) return out def roll_pdm(self, id_name): # TODO - get subsets + regular domain and range out = {} relations = self.get_OP_list() local_domains = [] local_ranges = [] for r in relations: #Assumes everything in file has a shorthand! # Better to key primary model on shortFormID and then re-key after reading. shorthand = self.b.getAnnotation(r, "shorthand") out[shorthand] = {} out[shorthand]['label'] = self.b.getLabel(r) try: out[shorthand]['usage'] = self.b.getAnnotation(r, "usage") out[shorthand]['defn'] = self.b.getAnnotation(r, "IAO_0000115") local_domains = self.b.getAnnotation(r, "local_domain").split(" ") local_ranges = self.b.getAnnotation(r, "local_range").split(" ") except: pass if local_domains: ldd = {} for ld in local_domains: if ld in id_name.keys(): ldd[ld] = id_name[ld] else: ldd[ld] = '' out[shorthand]['local_domain'] = str(ldd) if local_ranges: lrd = {} for lr in local_ranges: if lr in id_name.keys(): lrd[lr] = id_name[lr] else: lrd[lr] = '' out[shorthand]['local_range'] = str(lrd) return out
for term in tc: oboid = term["extId"][0] known_term_list.append(oboid) if oboid in oboid_domId: if "domainId" in term["domainData"]: term["domainData"]["domainId"] = oboid_domId[oboid] # Terns that didn't formerly have domainId will not, at the point else: term["domainData"] = {"domainColour": [0,128,128,127],"domainId": oboid_domId[oboid],"domainSelected": "false"} # Setting default colour - will need to fix. else: if "domainId" in term["domainData"]: term["domainData"]["domainId"] = '' # Remove old domain_id, if present. owl_id = re.sub(':','_',oboid) name = ''; if fbbt.knowsClass(owl_id): name = fbbt.getLabel(owl_id) print "Ignoring %s %s as , according to the mapping file, it is not a painted domain." % (oboid, name) new_tc.append(term) # Adding records for any new terms for oboid, domid in oboid_domId.items(): if oboid not in known_term_list: new_tc.append({"domainData":{"domainColour": [0,128,128,127],"domainId": domid,"domainSelected": "false"}, "extId": [oboid],"nodeState": {"open": "false", "selected": "false"} }) # Anything generated here will need a new nodeId. update_names(new_tc, fbbt) fbbt.sleep() write_json(new_tc, "../json/treeContent_JFRC_BN_final.jso")
#!/usr/bin/env jython import json from uk.ac.ebi.brain.core import Brain import sys """Takes a list of ontology URIs as args, writes a JSON lookup of ID:name.""" out = {} for path in sys.argv[1:]: entities = [] o = Brain() o.learn(path) entities.extend(list(o.getSubClasses('Thing', 0))) entities.extend(list(o.getInstances('Thing', 0))) for e in entities: # Need check for if label exists. Should be able to do that by iterating over all annotations on class to check. Will slow things down a lot... out[e] = o.getLabel(e) o.sleep() OUT = open('id_name.json', 'w') OUT.write(json.dumps(out))
b.learn(v[sys.argv[1]]) # Get all classes sc = b.getSubClasses('Thing', 0) # set constraints # Add nodes statements = [] for c in sc: label = '' try: label = b.getLabel(c) label = re.sub("'", "\\'", label) except: pass is_obsolete = False try: if b.getAnnotation(c, 'deprecated') == 'true': is_obsolete = True except: pass statements.append('MERGE (c:Class { short_form : "%s")' % c) statements.append('MATCH (c:Class { short_form : "%s") SET c.label = "%s" SET c.is_obsolete = %r' % (c, label, is_obsolete)) nc.commit_list(statements) b.sleep()
from uk.ac.ebi.brain.core import Brain import json all_ont = Brain() all_ont.learn = ("") # fbbi all_ont.learn = ("") # fbbt all_ont.learn = ("") # fbdv classlist = all_ont.getSubClasses("Thing", 0) id_name = {} for c in classlist: id_name[c] = all_ont.getLabel(c) lookup = open("lookup", "w") lookup.write(json.dump(id_name))
#!/usr/bin/env jython from uk.ac.ebi.brain.core import Brain import json gorel = Brain() gorel.learn("http://purl.obolibrary.org/obo/go/extensions/gorel.owl") # Declaring AE rels as list for now. relations = [ "GOREL_0001006" ] # Would be better to pull list of AE rels automatically from file. #With current structure, thus would require pulling subproperties. Can't do that with Brain. # Iterate over list, pulling usage and saving to file named for relation: for r in relations: label = gorel.getLabel(r) usage = gorel.getAnnotation(r, "usage") usage_md = open("../.gitdown/" + label + "_usage.md", "w") usage_md.write(usage) usage_md.close() gorel.sleep()
inds = vfb.getInstances("Thing", 0) # Could grab from neo4J avoiding Brain #statements = ["MATCH (i:Individual) RETURN i"] #r = nc.commit_list(statements) #inds = {some proc step here} # Iterate over individuals, looking up types and adding them statements = [] for i in inds: types = vom.typeAxioms2pdm(sfid = i) for t in types: if t['isAnonymous']: rel = re.sub(' ', '_', vfb.getLabel(t['relId'])) # Using related link. Can denormalise with generic edge naming script. s = "MATCH (I:Individual), (C:Class) WHERE I.short_form = '%s'" \ "AND C.short_form = '%s' MERGE (I)-[r:Related {label: '%s', short_form: '%s' }]->(C)" \ % (i, t['objectId'], rel, t['relId']) # statements.append(s) facts = vom.get_triples(sfid = i) for f in facts: rel = re.sub(' ', '_', vfb.getLabel(f[1])) s = "MATCH (I1:Individual), (I2:Individual) " \ "WHERE I1.short_form = '%s' and I2.short_form = '%s' " \ "MERGE (I1)-[r:Related { label: '%s', short_form: '%s' }]-(I2)" \ % (f[0], f[2], rel, f[1]) statements.append(s) nc.commit_list_in_chunks(statements, verbose = True, chunk_length = 1000)