def loadOntology(self, reasoner=Reasoner.NONE, memory_java="10240"): # self.world = World() # Method from owlready self.onto = get_ontology(self.urionto).load() # self.onto = self.world.get_ontology(self.urionto).load() # self.onto.load() # self.classifiedOnto = get_ontology(self.urionto + '_classified') owlready2.reasoning.JAVA_MEMORY = memory_java owlready2.set_log_level(9) if reasoner == Reasoner.PELLET: try: with self.onto: # it does add inferences to ontology # Is this wrt data assertions? Check if necessary # infer_property_values = True, infer_data_property_values = True logging.info("Classifying ontology with Pellet...") sync_reasoner_pellet( ) # it does add inferences to ontology unsat = len(list(self.onto.inconsistent_classes())) logging.info("Ontology successfully classified.") if unsat > 0: logging.warning( f"There are {str(unsat)} unsatisfiable classes: {list(self.onto.inconsistent_classes())}" ) except Exception: logging.info("Classifying with Pellet failed.") elif reasoner == Reasoner.HERMIT: try: with self.onto: # it does add inferences to ontology logging.info("Classifying ontology with HermiT...") sync_reasoner() # HermiT doe snot work very well.... unsat = len(list(self.onto.inconsistent_classes())) logging.info("Ontology successfully classified.") if unsat > 0: logging.warning( f"There are {str(unsat)} unsatisfiable classes: {list(self.onto.inconsistent_classes())}" ) return f"{str(unsat)} unsatisfiable classes: {list(self.onto.inconsistent_classes())}" except Exception: logging.info("Classifying with HermiT failed.") # End Classification # report problem with unsat (Nothing not declared....) # print(list(self.onto.inconsistent_classes())) self.graph = default_world.as_rdflib_graph() logging.info("There are {} triples in the ontology".format( len(self.graph)))
def sparql_query(self, query: str, with_prefixes=True, sync_reasoner=False) -> list or bool: """Queries the Ontology in SPARQL Args: query: A query in SPARQL with_prefixes: Whether the prefixes will be included prior to the query sync_reasoner: Whether to sync the Pellet reasoner or not Returns: A result """ if sync_reasoner: sync_reasoner_pellet() if with_prefixes: #q = self.rdflib_graph.query(query, initNs=self.iris) m = re.match(r"PREFIX (.+): <(.+)>", query) if m is not None: raise AssertionError("Prefixes are already included.") query = build_prefixes(self.iris) + query q = self.rdflib_graph.query(query) else: q = self.rdflib_graph.query(query) if q.type == 'ASK': return q.askAnswer else: return list(q)
def inserter(file, onto, save_path, reasoner=False): with open(file, 'rb') as f: triples = pickle.load(f) for t in triples: subject = t['subject']['value'].split('/')[-1] predicate = t['predicate']['value'].split('/')[-1] object = convertisor(t['object']) if object != None: s = onto.BikeStation(subject) map_list = { 'Name': s.Name, 'Lat': s.Lat, 'Long': s.Long, 'Lastupdate': s.Lastupdate, 'AvailableBikes': s.AvailableBikes, 'AvailableBikeStands': s.AvailableBikeStands } map_list[predicate].append(object) pre_reasoner(onto) if reasoner: with onto: sync_reasoner_pellet() onto.save(save_path)
def pellet_reason(self): """ load from disk, reason and return owlready2 ontology format""" self.world = World() onto = self.world.get_ontology(join(self.path, self.owl_file + ".owl")).load() sync_reasoner_pellet(self.world) return onto
def run(*args): if reasoner == 'Pellet': owlready2.sync_reasoner_pellet(*args) elif reasoner == 'HermiT': owlready2.sync_reasoner(*args) else: raise ValueError('unknown reasoner %r. Supported reasoners' 'are "Pellet" and "HermiT".', reasoner)
def ExecuteReasoner(self): onto = World() try: onto.get_ontology("file://" + self.input).load() sync_reasoner_pellet(onto, infer_property_values=True) onto.save(file=self.output) return True except: return False
def perform_reasoning(self): return_value = False with self.lock: with self.onto: try: sync_reasoner_pellet(infer_property_values=True, infer_data_property_values=True) return_value = True except Exception as err: raise err self.onto.save(file="error.owl", format="rdfxml") return return_value
def perform_reasoning(self): return_value = False with self.ontology_lock: with self.onto: try: sync_reasoner_pellet(infer_property_values=True, infer_data_property_values=True) return_value = True except Exception as err: logging.exception("{0}".format(err)) return False # raise err return return_value
def __init__(self, path, min_size_of_concept=1, max_concept_size_ratio=1.0, use_pellet=False): self.path = path self.onto = World().get_ontology(self.path).load(reload=True) if use_pellet: with self.onto: sync_reasoner_pellet(x=self.onto.world, infer_property_values=True, infer_data_property_values=True) self.name = self.onto.name self.concepts = dict() self.thing = None self.nothing = None # Next time thing about including this into Concepts. self.top_down_concept_hierarchy = defaultdict(set) self.top_down_direct_concept_hierarchy = defaultdict(set) self.down_top_concept_hierarchy = defaultdict(set) self.down_top_direct_concept_hierarchy = defaultdict(set) self.concepts_to_leafs = defaultdict(set) self.property_hierarchy = None self.parse() self.min_size_of_concept = min_size_of_concept self.max_size_of_concept = len( self.thing.instances) * (max_concept_size_ratio) self.role_log = dict() self.role_log_cardinality = dict() self.__concept_generator = (ConceptGenerator( self, concepts=self.concepts, T=self.thing, Bottom=self.nothing, onto=self.onto, min_size_of_concept=self.min_size_of_concept, max_size_of_concept=self.max_size_of_concept))
def __iter__(self) -> Iterator[Optional[Tuple[Ontology, Scene]]]: for index, scene in enumerate(self.variation_dimensions): self.iterations += 1 if scene is not None: world = World(backend='sqlite', filename=':memory:', dbname=f"scene_db_{index:04}") with world.get_ontology(self.base_iri) as onto: self.domain_factory(onto) self.instantiate_scene(scene, onto) try: sync_reasoner_pellet(x=world, infer_data_property_values=True, infer_property_values=True, debug=self.debug) except Exception as e: onto.save("error.rdf.xml") raise e yield onto, scene else: yield None if self.max_tries is not None and self.iterations >= self.max_tries: break
def reasoner(data): # print(data.shape) # print("Inside OntoParser-Reasoner") # creating a new world to isolate the reasoning results # ontos = {n: World().get_ontology(onto_dir_path).load() # for n in range(data.shape[0])} new_world = World() # Loading the ontology onto = new_world.get_ontology(onto_dir_path).load() # Creating individuals of Lectura that will be used by the rules onto.Variable_Dil1_Entrada.tieneValorPropuesto = float(data[0]) onto.Lectura_AGV_Entrada.tieneValorCensado = float(data[1]) onto.Lectura_DQO_Entrada.tieneValorCensado = float(data[2]) onto.Lectura_Biomasa_Salida.tieneValorCensado = float(data[3]) onto.Lectura_DQO_Salida.tieneValorCensado = float(data[4]) onto.Lectura_AGV_Salida.tieneValorCensado = float(data[5]) onto.Variable_Dil2_Entrada.tieneValorPropuesto = float(data[7]) onto.Lectura_Ace_Salida.tieneValorCensado = float(data[9]) onto.Lectura_xa_Salida.tieneValorCensado = float(data[10]) onto.Lectura_xm_Salida.tieneValorCensado = float(data[11]) onto.Lectura_xh_Salida.tieneValorCensado = float(data[12]) onto.Lectura_mox_Salida.tieneValorCensado = float(data[13]) onto.Lectura_imec_Salida.tieneValorCensado = float(data[14]) onto.Lectura_QH2_Salida.tieneValorCensado = float(data[15]) # Apply the rules using pellet reasoner sync_reasoner_pellet(onto, infer_data_property_values=True, infer_property_values=True, debug=0) # Get new states for each process infered_states = get_infered_states(onto) return json.dumps(infered_states), onto
draft2.published = [volume] paper1.submittedTo = [draft1] paper2.submittedTo = [draft2] chair.chairOf = [workshop] editor.editorOf = [journal] # When the reasoner is activated, # if you set the draft to not accepted (i.e. draft1.accepted = False) # then the draft cannot be published to an edition! # # Restrictions are also checked, for example paper2.submittedTo = [draft1] will fail. # owl.sync_reasoner_hermit() owl.sync_reasoner_pellet([onto], infer_property_values=True, infer_data_property_values=True, debug=show_debug) print('Inconsisten classes:', list(owl.default_world.inconsistent_classes())) # The reasoner infered that the Draft was an AcceptedDraft print(draft1.__class__) # For more complex queries: use SPARQL with RDFlib print('Subclasses of :Person = ', onto.search(subclass_of=onto.Person)) file = "out/reasoner.rdf" onto.save(file=file, format="rdfxml")
def build_table(self, **kwargs): """ Builds (or loads) the projection table with the given parameters. :key check_consistency: shall a consistency check be run before building the projection table? (default is False) :key mode: specifies how the projection table must be built: - 'disjunction': The projection value is computed by looking up the disjunction pairs (faster, but prone to errors); - 'from_file': The projection table is loaded from the path provided in the 'table_path' key; - 'reasoner': The table is built using the results obtained from the reasoner (slowest but correct). :key table_path: if mode = 'from_file', specifies the file path from which the table must be loaded :return: """ # build the projection table _table = pd.DataFrame(data=_PT_UNCERTAIN, index=self.individuals.keys(), columns=self.classes.keys()) # run the reasoner check_consistency = kwargs.get('check_consistency') or False if check_consistency: with self.onto: # runs a consistency check or2.sync_reasoner_pellet() pass projection_mode = kwargs.get('mode') or 'reasoner' if projection_mode == 'disjunction': # builds the disjunction lookup dict disjunctions = {c: set() for c in self.classes.keys()} for pair in self.onto.disjoint_classes(): first, second = pair.entities disjunctions[str(first)].add(second) disjunctions[str(second)].add(first) # faster method, but less accurate for c_name, c in self.classes.items(): true_set = set(c.instances()) false_set = set() for d in disjunctions[c_name]: false_set = false_set.union(d.instances()) for t in true_set: _table.at[str(t), c_name] = _PT_TRUE for f in false_set: _table.at[str(f), c_name] = _PT_FALSE elif projection_mode == 'from_file': table_path = kwargs['table_path'] if table_path: _table = pd.read_csv(table_path, delimiter=";", index_col=0) else: raise # Build using the reasoner elif projection_mode == 'reasoner': print("WARNING: This method introduces a lot of noise in the ontology, use it only to build a projection table!") # list that holds all the references to the not_ classes classes_with_complements = [] with self.onto: for c_name, c_item in self.classes.items(): neg_class = types.new_class('NOT_' + c_item.name, (or2.Thing,)) neg_class.equivalent_to = [or2.Not(c_item)] classes_with_complements.append((c_name, c_item, neg_class)) # run the reasoner with self.onto: or2.sync_reasoner_pellet() # sets the cells with the respective value for c_name, c, not_c in classes_with_complements: true_set = set(c.instances()) false_set = set(not_c.instances()) for t in true_set: _table.at[str(t), c_name] = _PT_TRUE for f in false_set: _table.at[str(f), c_name] = _PT_FALSE else: raise Exception("ERROR: Unrecognized mode '%s'." % projection_mode) # assigns the newly-built table to the inner parameter self.projection_table = _table pass
def sync(tmp_file, java_path): """Run reasoning on ontology.""" owlready2.JAVA_EXE = java_path with onto: sync_reasoner_pellet(debug=0, infer_property_values=False) onto.save(tmp_file)
# inverse_property = has_type # # class meter(emmo.measurement_unit): # label = ['m'] class VAR(emmo.physical_quantity): is_a = [has_unit.exactly(1, meter), has_type.exactly(3, real)] pass class is_function_of(VAR >> VAR): pass a = VAR("a") b = VAR("b") c = VAR("c") a.is_function_of = [onto.b, onto.c] onto.sync_attributes() sync_reasoner_pellet([onto]) owlfile = "play.owl" onto.save_me(owlfile) print(a) print(a.is_function_of) print("end")
def sync_reasoner(self, **kwargs): sync_reasoner_pellet(x=self.world, **kwargs)
def checkConfigViability(configId, region_geojson, start_date, end_date, rulesdir=RULES_DIR, ontdir=ONTOLOGY_DIR): print("Get model IO details...", end='\r') config = getModelConfigurationDetails(configId) # FIXME: Could also return configs without inputs ? if config.has_input is None: return None # FIXME: Change to getModelRules from model catalaog (or config should already have it) # rules = getModelRulesFromFile(configId, rulesdir=rulesdir) rules = getModelRulesFromConfig(config) # FIXME: For now only proceeding if there are rules for this model if len(rules) == 0: return None print("\n{}\n{}".format(config.label[0], "=" * len(config.label[0]))) if ontdir not in onto_path: onto_path.append(ontdir) relevant_input_variables = {} onto = get_ontology(EXECUTION_ONTOLOGY_URL).load() with onto: for r in rules: rule = Imp() rule.set_as_rule(r) ruleinputs = parseModelRule(rule) for rinput in ruleinputs: ivar = rinput["input"] if ivar not in relevant_input_variables: relevant_input_variables[ivar] = [] if rinput["variable"] not in relevant_input_variables[ivar]: relevant_input_variables[ivar].append(rinput["variable"]) print("{}".format(''.join([' '] * 100)), end='\r') # Clear line input_djmvs = {} for input in config.has_input: input_label = input.label[0] # If this input is used in the rule if input_label in relevant_input_variables: # Get the variable to derive for this input derived_variables = relevant_input_variables[input_label] # Fetch dataset information for this input from the data catalog if input.has_presentation is not None: print("\nInput: {}".format(input_label)) # Get Variables for this input variables = [] for pres in input.has_presentation: if pres.has_standard_variable is not None: variables.append( pres.has_standard_variable[0].label[0]) #print("\tVariables: {}".format(str(variables))) print( f"- Searching datasets containing variables '{variables}' for this region and time period...", end='\r') datasets = getMatchingDatasets(variables, region_geojson, start_date, end_date) print("{}".format(''.join([' '] * 100)), end='\r') # Clear line djmvs = [] if len(datasets) == 0: print( "\r- No datasets found in data catalog matching input variables {} for this region and time period." .format(variables)) else: # Get datasets that match the input type as well matches = datasets #matchTypedDatasets(datasets, input.type) if len(matches) == 0: print( "\r- No datasets found in data catalog for matching type" ) for ds in matches: meta = ds["dataset_metadata"] print("\r- Dataset: {} ( Fetching files... )".format( ds["dataset_name"]), end='\r') resources = getMatchingDatasetResources( ds["dataset_id"], region_geojson, start_date, end_date) print("\r- Dataset: {} ( {} files... ) ".format( ds["dataset_name"], len(resources))) resource_urls = list( map(lambda res: res["resource_data_url"], resources)) if len(resources) == 0: print("- No files found") continue print("\t- Deriving {} values for dataset...".format( str(derived_variables)), end='\r') derived_variable_values = {} for derived_variable in derived_variables: if derived_variable not in derived_variable_values: values = getDerivedVariableValues( config, meta["datatype"], resource_urls, derived_variable, region_geojson, start_date, end_date) derived_variable_values.update(values) for dv, dvv in derived_variable_values.items(): print("\t- {} = {}{}".format( dv, dvv, ''.join([' '] * 50))) djmvs.append({ "dataset": ds, "derived_variables": derived_variable_values }) input_djmvs[input_label] = djmvs # Create Cross product combinations across all input djmvs keys = list(input_djmvs.keys()) values = list(input_djmvs.values()) products = list(itertools.product(*values)) input_djmv_combos = [] for prod in products: combo = {} for i in range(0, len(keys)): combo[keys[i]] = prod[i] input_djmv_combos.append(combo) if len(input_djmv_combos) > 0: print("\nConstraint Reasoning Over MOdel:") return_values = [] # For each combination, create an onto, and run the rules # Check if the combination is valid count = 1 for input_djmv_combo in input_djmv_combos: print("\n------ Data combination {} -------".format(count)) count += 1 for input_label, djmv in input_djmv_combo.items(): print("- {} : {}".format(input_label, djmv["dataset"]["dataset_name"])) return_djmv_combo = [] onto = get_ontology(EXECUTION_ONTOLOGY_URL).load() with onto: exobj = onto.ModelExecution() exobj.hasModelInput = [] for r in rules: rule = Imp() rule.set_as_rule(r) for input_label, djmv in input_djmv_combo.items(): return_djmv = copy.deepcopy(djmv) inobj = onto.ModelInput() inobj.hasLabel = input_label inobj.hasDataBinding = [] exobj.hasModelInput.append(inobj) dsobj = onto.DataBinding() inobj.hasDataBinding.append(dsobj) dsobj.hasVariable = [] return_derived_variables = [] for dv, dvv in djmv["derived_variables"].items(): dvarobj = onto.Variable() dvarobj.hasLabel = dv dvarobj.hasValue = dvv dsobj.hasVariable.append(dvarobj) return_derived_variables.append({ "variable_id": dv, "value": dvv }) return_djmv["dataset"][ "derived_variables"] = return_derived_variables return_djmv_combo.append({ "input_id": input_label, "dataset": return_djmv["dataset"] }) sync_reasoner_pellet(infer_property_values=True, infer_data_property_values=True, debug=0) valid = None recommended = None for exv in exobj.isValid: if exv and valid is None: valid = True if not (exv): valid = False for exr in exobj.isRecommended: if exr and recommended is None: recommended = True if not (exr): recommended = False print("") if recommended == True: print("\u2713 RECOMMENDED") elif recommended == False: print("\u2717 NOT RECOMMENDED") if valid == True: print("\u2713 VALID") elif valid == False: print("\u2717 INVALID") for reason in exobj.hasValidityReason: print("\t \u2713 {}".format(reason)) for reason in exobj.hasInvalidityReason: print("\t \u2717 {}".format(reason)) for reason in exobj.hasRecommendationReason: print("\t \u2713 {}".format(reason)) for reason in exobj.hasNonRecommendationReason: print("\t \u2717 {}".format(reason)) return_values.append({ "inputs": return_djmv_combo, "validity": { "valid": valid, "validity_reasons": exobj.hasValidityReason, "invalidity_reasons": exobj.hasInvalidityReason, "recommended": recommended, "recommendation_reasons": exobj.hasRecommendationReason, "non_recommendation_reasons": exobj.hasNonRecommendationReason } }) onto.destroy() return return_values
#insert developers dados = pd.read_csv( '/home/tales/.config/spyder-py3/ExFindO/input/author-pull-nodejs.csv', sep=";", header=0) array = dados.values size = 0 for n in array: size = size + 1 k = 0 for k in range(size): pull = onto.PullRequest("PullRequest" + str(int(array[k][0]))) dev = onto.Developer(str(array[k][1])) dev.hasPullRequest.append(pull) k = k + 1 sync_reasoner_pellet(infer_property_values=True, infer_data_property_values=True) #get all PullRequests PR = onto.search(type=onto.PullRequest) N = onto.search(type=onto.NULL) #for each PullRequest for i in PR: top = i.hasTopic stop = i.hasSpecificTopic mod = i.hasModule di = i.hasDirectory ap = i.hasPA cont = 1 #if empty receiver NULL. Not to block the loop below. if (not top): top = N