Esempio n. 1
0
def load_ontology_modules():
    global model_asset, model_fmea_iso, model_functional_system, model_iso
    onto_path.append("./owl-files")
    model_asset = get_ontology("asset.owl").load()
    model_fmea_iso = get_ontology("fmea-iso.owl").load()
    model_functional_system = get_ontology(
        "functional-system-ontology.owl").load()
    model_iso = get_ontology("LIS-14.owl").load()
    return model_asset, model_fmea_iso, model_functional_system, model_iso
def extract_ontology_terms(spark_session: SparkSession,
                           ontologies_path: str) -> DataFrame:
    """

    :param spark_session:
    :param ontologies_path:
    :return:
    """
    directory = os.fsencode(ontologies_path)
    ontology_terms = []
    for file in os.listdir(directory):
        filename = os.fsdecode(file)
        if filename.endswith(".owl"):
            onto_path.append(os.path.join(directory, filename))
            ontology = get_ontology(None).load()
            ontology_terms.extend(parse_ontology(ontology))
    ontology_terms_df = spark_session.createDataFrame(
        convert_to_row(term) for term in ontology_terms)
    return ontology_terms_df
def load_ontology():
    print('Ontology loading...')
    onto_path.append('ontology/')
    onto_path.append('ontology/judo-master')
    onto_path.append('ontology/lkif-core-master/')
    onto = get_ontology(
        'http://www.semanticweb.org/marko/ontologies/2020/3/lcr_onto.owl'
    ).load()

    return onto
    #                if ancestor in parent.ancestors())
    #
    # def closest_common_ancestors(self, cls1, cls2):
    #     """Returns a list  with closest_common_ancestor to cls1 and cls2"""
    #     distances = {}
    #     for ancestor in self.common_ancestors(cls1, cls2):
    #         distances[ancestor] = self.number_of_generations(cls1, ancestor) + self.number_of_generations(cls2,
    #         ancestor)
    #     return [ancestor for ancestor, distance in distances.items() if distance == min(distances.values())]
    def save_me(self, owl_file):
        if os.path.exists(owlfile):
            os.remove(owlfile)
        self.save(owlfile)


onto_path.append("./emmo/rdfxml")
emmo = get_ontology(name="emmo")
emmo.load()
# sync_reasoner_pellet([emmo])

onto = get_ontology("play.owl", )
onto.imported_ontologies.append(emmo)

onto.base_iri = 'http://www.emmc.info/emmc-csa/demo#'
# onto.base_iri = emmo.base_iri

print("classes : ", list(emmo.classes()))
print("individuals :", list(emmo.individuals()))
print("object_properties :", list(emmo.object_properties()))
print("properties :", emmo.search(iri="physical_quantity"))
print("base iri   :", emmo.base_iri)
Esempio n. 5
0
def checkConfigViability(configId,
                         region_geojson,
                         start_date,
                         end_date,
                         rulesdir=RULES_DIR,
                         ontdir=ONTOLOGY_DIR):
    print("Get model IO details...", end='\r')
    config = getModelConfigurationDetails(configId)

    # FIXME: Could also return configs without inputs ?
    if config.has_input is None:
        return None

    # FIXME: Change to getModelRules from model catalaog (or config should already have it)
    # rules = getModelRulesFromFile(configId, rulesdir=rulesdir)
    rules = getModelRulesFromConfig(config)

    # FIXME: For now only proceeding if there are rules for this model
    if len(rules) == 0:
        return None

    print("\n{}\n{}".format(config.label[0], "=" * len(config.label[0])))

    if ontdir not in onto_path:
        onto_path.append(ontdir)

    relevant_input_variables = {}
    onto = get_ontology(EXECUTION_ONTOLOGY_URL).load()
    with onto:
        for r in rules:
            rule = Imp()
            rule.set_as_rule(r)
            ruleinputs = parseModelRule(rule)
            for rinput in ruleinputs:
                ivar = rinput["input"]
                if ivar not in relevant_input_variables:
                    relevant_input_variables[ivar] = []
                if rinput["variable"] not in relevant_input_variables[ivar]:
                    relevant_input_variables[ivar].append(rinput["variable"])

    print("{}".format(''.join([' '] * 100)), end='\r')  # Clear line

    input_djmvs = {}

    for input in config.has_input:
        input_label = input.label[0]

        # If this input is used in the rule
        if input_label in relevant_input_variables:
            # Get the variable to derive for this input
            derived_variables = relevant_input_variables[input_label]

            # Fetch dataset information for this input from the data catalog
            if input.has_presentation is not None:
                print("\nInput: {}".format(input_label))
                # Get Variables for this input
                variables = []
                for pres in input.has_presentation:
                    if pres.has_standard_variable is not None:
                        variables.append(
                            pres.has_standard_variable[0].label[0])
                #print("\tVariables: {}".format(str(variables)))

                print(
                    f"- Searching datasets containing variables '{variables}' for this region and time period...",
                    end='\r')
                datasets = getMatchingDatasets(variables, region_geojson,
                                               start_date, end_date)
                print("{}".format(''.join([' '] * 100)),
                      end='\r')  # Clear line

                djmvs = []
                if len(datasets) == 0:
                    print(
                        "\r- No datasets found in data catalog matching input variables {} for this region and time period."
                        .format(variables))
                else:
                    # Get datasets that match the input type as well
                    matches = datasets  #matchTypedDatasets(datasets, input.type)
                    if len(matches) == 0:
                        print(
                            "\r- No datasets found in data catalog for matching type"
                        )

                    for ds in matches:
                        meta = ds["dataset_metadata"]
                        print("\r- Dataset: {} ( Fetching files... )".format(
                            ds["dataset_name"]),
                              end='\r')

                        resources = getMatchingDatasetResources(
                            ds["dataset_id"], region_geojson, start_date,
                            end_date)

                        print("\r- Dataset: {} ( {} files... )       ".format(
                            ds["dataset_name"], len(resources)))

                        resource_urls = list(
                            map(lambda res: res["resource_data_url"],
                                resources))

                        if len(resources) == 0:
                            print("- No files found")
                            continue

                        print("\t- Deriving {} values for dataset...".format(
                            str(derived_variables)),
                              end='\r')
                        derived_variable_values = {}
                        for derived_variable in derived_variables:
                            if derived_variable not in derived_variable_values:
                                values = getDerivedVariableValues(
                                    config, meta["datatype"], resource_urls,
                                    derived_variable, region_geojson,
                                    start_date, end_date)
                                derived_variable_values.update(values)

                        for dv, dvv in derived_variable_values.items():
                            print("\t- {} = {}{}".format(
                                dv, dvv, ''.join([' '] * 50)))

                        djmvs.append({
                            "dataset":
                            ds,
                            "derived_variables":
                            derived_variable_values
                        })

                input_djmvs[input_label] = djmvs

    # Create Cross product combinations across all input djmvs
    keys = list(input_djmvs.keys())
    values = list(input_djmvs.values())
    products = list(itertools.product(*values))
    input_djmv_combos = []
    for prod in products:
        combo = {}
        for i in range(0, len(keys)):
            combo[keys[i]] = prod[i]
        input_djmv_combos.append(combo)

    if len(input_djmv_combos) > 0:
        print("\nConstraint Reasoning Over MOdel:")

    return_values = []

    # For each combination, create an onto, and run the rules
    # Check if the combination is valid
    count = 1
    for input_djmv_combo in input_djmv_combos:
        print("\n------ Data combination {} -------".format(count))
        count += 1
        for input_label, djmv in input_djmv_combo.items():
            print("- {} : {}".format(input_label,
                                     djmv["dataset"]["dataset_name"]))

        return_djmv_combo = []
        onto = get_ontology(EXECUTION_ONTOLOGY_URL).load()
        with onto:
            exobj = onto.ModelExecution()
            exobj.hasModelInput = []
            for r in rules:
                rule = Imp()
                rule.set_as_rule(r)
            for input_label, djmv in input_djmv_combo.items():
                return_djmv = copy.deepcopy(djmv)

                inobj = onto.ModelInput()
                inobj.hasLabel = input_label
                inobj.hasDataBinding = []
                exobj.hasModelInput.append(inobj)

                dsobj = onto.DataBinding()
                inobj.hasDataBinding.append(dsobj)
                dsobj.hasVariable = []

                return_derived_variables = []
                for dv, dvv in djmv["derived_variables"].items():
                    dvarobj = onto.Variable()
                    dvarobj.hasLabel = dv
                    dvarobj.hasValue = dvv
                    dsobj.hasVariable.append(dvarobj)
                    return_derived_variables.append({
                        "variable_id": dv,
                        "value": dvv
                    })
                return_djmv["dataset"][
                    "derived_variables"] = return_derived_variables
                return_djmv_combo.append({
                    "input_id": input_label,
                    "dataset": return_djmv["dataset"]
                })

            sync_reasoner_pellet(infer_property_values=True,
                                 infer_data_property_values=True,
                                 debug=0)

            valid = None
            recommended = None
            for exv in exobj.isValid:
                if exv and valid is None:
                    valid = True
                if not (exv):
                    valid = False
            for exr in exobj.isRecommended:
                if exr and recommended is None:
                    recommended = True
                if not (exr):
                    recommended = False

            print("")
            if recommended == True:
                print("\u2713 RECOMMENDED")
            elif recommended == False:
                print("\u2717 NOT RECOMMENDED")
            if valid == True:
                print("\u2713 VALID")
            elif valid == False:
                print("\u2717 INVALID")

            for reason in exobj.hasValidityReason:
                print("\t \u2713 {}".format(reason))
            for reason in exobj.hasInvalidityReason:
                print("\t \u2717 {}".format(reason))
            for reason in exobj.hasRecommendationReason:
                print("\t \u2713 {}".format(reason))
            for reason in exobj.hasNonRecommendationReason:
                print("\t \u2717 {}".format(reason))

            return_values.append({
                "inputs": return_djmv_combo,
                "validity": {
                    "valid": valid,
                    "validity_reasons": exobj.hasValidityReason,
                    "invalidity_reasons": exobj.hasInvalidityReason,
                    "recommended": recommended,
                    "recommendation_reasons": exobj.hasRecommendationReason,
                    "non_recommendation_reasons":
                    exobj.hasNonRecommendationReason
                }
            })
            onto.destroy()

    return return_values
Esempio n. 6
0
import copy
import itertools
from modelcatalog import api_client, configuration
from modelcatalog.api.model_configuration_api import ModelConfigurationApi
from modelcatalog.api.model_configuration_setup_api import ModelConfigurationSetupApi
from modelcatalog.exceptions import ApiException
from cromo.catalogs.data_catalog import getMatchingDatasetResources, getMatchingDatasets
from cromo.constants import EXECUTION_ONTOLOGY_URL, MODEL_CATALOG_URL, DEFAULT_USERNAME, ONTOLOGY_DIR, RULES_DIR, getLocalName
from owlready2 import onto_path, get_ontology, Imp, sync_reasoner_pellet

from cromo.metadata.sensors import SENSORS

MC_API_CLIENT = api_client.ApiClient(
    configuration.Configuration(host=MODEL_CATALOG_URL))

onto_path.append(ONTOLOGY_DIR)


# Get all model configurations
def getAllModelConfigurations():
    try:
        # List all Model entities
        api_instance = ModelConfigurationApi(api_client=MC_API_CLIENT)
        configs = api_instance.modelconfigurations_get(
            username=DEFAULT_USERNAME)
        return configs
    except ApiException as e:
        print(
            "Exception when calling ModelConfigurationApi->modelconfigurations_get: %s\n"
            % e)
    return []
Esempio n. 7
0
    #     if descendant.name == ancestor.name:
    #         return n
    #     return min(self._number_of_generations(parent, ancestor, n + 1)
    #                for parent in descendant.get_parents()
    #                if ancestor in parent.ancestors())
    #
    # def closest_common_ancestors(self, cls1, cls2):
    #     """Returns a list  with closest_common_ancestor to cls1 and cls2"""
    #     distances = {}
    #     for ancestor in self.common_ancestors(cls1, cls2):
    #         distances[ancestor] = self.number_of_generations(cls1, ancestor) + self.number_of_generations(cls2, ancestor)
    #     return [ancestor for ancestor, distance in distances.items() if distance == min(distances.values())]



onto_path.append(("../emmo/rdfxml"))
emmo = get_ontology("emmo-properties.owl") #emmo-all-inferred.owl")
namespace = emmo.get_namespace(emmo.base_iri)
print("emmon", emmo)
emmo.load()


onto = get_ontology("play.owl", )
onto.imported_ontologies.append(emmo)

onto.base_iri = emmo.base_iri

print("base iri : ", default_world.ontologies)
print("classes : ", list(emmo.classes()))
print("individuals :", list(emmo.individuals()))
print("object_properties :", list(emmo.object_properties()))
Esempio n. 8
0
from django.template import loader, Context
from base64 import b64encode
import ssl
import requests
import psycopg2
import json
import nltk
from nltk.corpus import stopwords
from functools import reduce
from openpyxl import load_workbook
import logging
import os
import time
from owlready2 import get_ontology, onto_path

onto_path.append(os.path.dirname(__file__))
onto = get_ontology("CSO.owl")

userAndPass = b64encode(
    b"GpxNVedkBslJE6CTga0f56iRG4vzzmYU24gzH0g5:FGMx5x8Vjr7LyBokikzIT9t4uFSSa30HMhMGcEHZBy38FV2snjwew0l9o3ctugs1KRcIvBQyZDidYKuMKrWUGHCA0qRNYMvFg859QhpatbpBPZW3QNAeJzpHBAYNkBoy"
).decode("ascii")
headers = {'Authorization': 'Basic %s' % userAndPass}
nltk.download(info_or_id='stopwords', download_dir=os.path.dirname(__file__))

PLURALSIGHT_EXTRACTOR_VERSION = 1
UDEMY_EXTRACTOR_VERSION = 1
LYNDA_EXTRACTOR_VERSION = 1
COURSERA_EXTRACTOR_VERSION = 1
OFFLINE_EXTRACTOR_VERSION = 1

Esempio n. 9
0
class Ontology():
    dir_path = str(BASE_DIR) + "/" + str(MEDIA_URL)
    file_name = ""
    path = ""
    onto_path.append(dir_path)
    base_iri = ""
    ontology = ""
    prefix = ""

    def __init__(self, file_name, prefix="myOnt"):
        self.file_name = file_name
        self.path = self.dir_path + self.file_name
        self.prefix = prefix
        self.load()

    def load(self):
        ontology = get_ontology(self.path)
        ontology.load()
        self.base_iri = ontology.base_iri
        self.ontology = ontology

    def query(self, query="", show_print=False):
        self.load()
        set_import = """
        PREFIX rdf:  <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
        PREFIX owl:  <http://www.w3.org/2002/07/owl#>
        PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
        PREFIX xsd:  <http://www.w3.org/2001/XMLSchema#>
        PREFIX %s:   <%s> \n
        """ % (self.prefix, self.base_iri)
        graph = default_world.as_rdflib_graph()
        query_SPARQL = set_import + query
        if show_print:
            print(query_SPARQL)
        rows = list(graph.query(query_SPARQL))
        result = []
        for row in rows:
            replaced_row = []
            for term in row:
                replaced_row.append(str(term).replace(self.base_iri, ''))
            result.append(replaced_row)
        return result

    def get_instances_of(self, by_class):
        query = """
        SELECT  ?instances
        WHERE {
                ?instances rdf:type owl:NamedIndividual .
                ?instances rdf:type %s:%s
            }
        """ % (self.prefix, by_class)
        result = self.query(query)
        instances = []
        for instance in result:
            instances += instance
        return instances

    def get_sub_classes_of(self, by_class):
        query = """
        SELECT ?instances
        WHERE {
                ?instances rdfs:subClassOf* %s:%s
            }
        """ % (self.prefix, by_class)
        result = self.query(query)
        instances = []
        for instance in result:
            instances += instance
        return instances

    def get_instance(self, by_name):
        query_properties = """
        SELECT  ?properties ?values
        WHERE {
            %s:%s rdf:type ?all_types .
            ?all_types owl:onProperty ?properties .
            ?all_types owl:someValuesFrom ?values
        }
        """ % (self.prefix, by_name)
        result_properties = self.query(query_properties)
        properties = []
        for row in result_properties:
            properties.append(row[0] + " " + row[1])

        query_class = """
        SELECT  ?classes
        WHERE {
            %s:%s rdf:type ?classes .
            ?classes rdf:type owl:Class
        }
        """ % (self.prefix, by_name)
        result_class = self.query(query_class)
        classes = []
        for row in result_class:
            classes += row

        try:
            query_class = """
            SELECT DISTINCT ?games
            WHERE {
                %s:%s rdf:type ?all_types .
                ?all_types owl:someValuesFrom ?habilidades .

                ?habilidades rdfs:subClassOf myOnt:Habilidade .
                ?habilidades rdfs:subClassOf ?Habtypes .
                ?Habtypes owl:someValuesFrom ?sujeitos .

                ?games rdf:type myOnt:Jogo .
                ?games rdf:type owl:NamedIndividual .
                ?games rdf:type ?gamesTypes .
                ?gamesTypes owl:someValuesFrom ?sujeitos .
            }
            """ % (self.prefix, by_name)
            result_games = self.query(query_class)
            games = []
            for row in result_games:
                games += row
        except BaseException:
            games = {}

        return {'classes': classes, 'properties': properties, 'games': games}

    def add_instance(self, payload):
        class_name = payload['class_name']
        instance_name = payload['instance_name']
        property_name = payload['property_name']
        property_values = payload['property_values']

        onto_file = self.ontology
        onto_class = self.ontology[class_name]
        onto_property = self.ontology[property_name]

        try:
            new_instance = onto_class(instance_name, namespace=onto_file)
            destroy_entity(new_instance)
            new_instance = onto_class(instance_name, namespace=onto_file)
            for value in property_values:
                onto_value = self.ontology[value]
                new_instance.is_a.append(onto_property.some(onto_value))
            onto_file.save(self.path)
            self.load()
        except BaseException:
            pass

    def delete_instance(self, class_name, instance_name):
        onto_file = self.ontology
        onto_class = self.ontology[class_name]

        try:
            new_instance = onto_class(instance_name, namespace=onto_file)
            destroy_entity(new_instance)
            onto_file.save(self.path)
            self.load()
        except BaseException:
            pass