def __init__(self, peptidoform_string=None, verbose=None):

        self.peptidoform_string = None
        self.peptide_sequence = None
        self.residue_modifications = {}
        self.terminal_modifications = {}
        self.unlocalized_mass_modifications = {}
        self.neutral_mass = None

        self.response = Response()
        self.is_valid = False

        if len(ontologies) == 0:
            #print("Loading ontologies...", end='', flush=True)
            #ontologies['UNIMOD'] = Ontology(filename='G:/Repositories/SVN/proteomics/var/CV/unimod.obo')
            #ontologies['PSI-MOD'] = Ontology(filename='G:/Repositories/SVN/proteomics/var/CV/PSI-MOD.obo')
            ontologies['UNIMOD'] = Ontology(
                filename=
                '/net/dblocal/wwwspecial/proteomecentral/extern/CVs/unimod.obo'
            )
            ontologies['PSI-MOD'] = Ontology(
                filename=
                '/net/dblocal/wwwspecial/proteomecentral/extern/CVs/PSI-MOD.obo'
            )
            #print(" done.")

        if peptidoform_string:
            self.parse(peptidoform_string, verbose=None)
Exemple #2
0
def main():
    if (sys.argv[-1] == '--collect'):
        url_pois = 'https://pt.foursquare.com/explore?mode=url&ne=-29.358988%2C-50.837817&q=Sele%C3%A7%C3%B5es%20principais&sw=-29.41889%2C-50.887942'
        url_city = 'http://www.dataviva.info/pt/location/5rs020102'

        e = Extraction(url_pois, url_city)
        e.poi_data_extraction()
        e.city_data_extraction()

    # Gera relatório do dataset
    file = 'foursquare_data.csv'
    df = pd.read_csv(file, parse_dates=True, encoding='UTF-8')
    profile = pandas_profiling.ProfileReport(df)
    profile.to_file(outputfile='dataset_report.html')

    P = Process(file)
    df = P.method()

    df_report = pd.read_csv('preprocessed.csv',
                            parse_dates=True,
                            encoding='UTF-8')
    profile = pandas_profiling.ProfileReport(df_report)
    profile.to_file(outputfile='preprocessed_dataset_report.html')

    R = Recommendation(df)
    R.pattern_recommendation()
    R.new_recommendation()
    R.compare()
    # R.test_rec()

    ont = Ontology()
    ont.write_owl()
 def decode_resource_path(self, path):
     result = None
     if path:
         decoded = Ontology(self.env, 'ns.medium.resource.url.decode')
         decoded['directory'], decoded['file name'] = os.path.split(path)
         if 'file name' in decoded and 'directory' in decoded:
             
             # Normalize the directory
             # This will replace path framents with canonic values
             decoded['directory'] = self.normalize(decoded['directory'])
             
             # Check if the directory resides in a volume
             for volume in self.volume.element.values():
                 if os.path.commonprefix((volume.node['real'], decoded['directory'])) == volume.node['real']:
                     decoded['volume'] = volume.key
                     
             # If a UMID was encoded in the name, infer the home id and media kind
             # This will also trigger rule.medium.resource.filename.parse
             if 'umid' in decoded:
                 umid = Umid.decode(decoded['umid'])
                 if umid:
                     decoded['media kind'] = umid.media_kind
                     decoded['home id'] = umid.home_id
                     
             # Make the elements of the decoded onlology kernel elements of the result
             result = decoded.project('ns.medium.resource.location')
             for k,v in decoded.iteritems(): result[k] = v
             
             # set the host and domain
             result['host'] = self.host
             result['domain'] = self.domain
     return result
Exemple #4
0
def parse(oboFile, typedefs):
    """
    Parses an OBO (ontology file) creating a correspoinding Ontology 
    object composed of Term objects that relate to the terms found in the
    ontology file.
    """
    ontology = Ontology()

    ## Use groupby to group the lines of our file into chunks of text, some 
    ## stanzas, some typedefs, and other metadata to be processed 
    with open(oboFile) as f:
        for (key, group) in groupby(f, is_data):
            if key:
                header = group.next().rstrip('\n')

                if header.find('[Typedef]') != -1:
                    dataDict = get_data_as_dict(group)
                    ontology.add_typedef(dataDict['name'])
                elif header.find('[Term]') != -1:
                    dataDict = get_data_as_dict(group, typedefs)
                    ontology.add_term(build_term(dataDict))
                else:                    
                    # We are dealing with ontology metadata that should be 
                    # captured in our ontology object.
                    ontology.metadata.append(header)
                    ontology.metadata.extend([x.strip() for x in group])

    return ontology               
Exemple #5
0
def po_example():
    ontology = Ontology(filename='plant-ontology.obo', verbose=1)
    ontology.show()
    print("============================")
    name = 'xyl'
    result_list = ontology.fuzzy_search(search_string=name)
    for item in result_list:
        print(item)
Exemple #6
0
 def test_TasksReader(self):
     '''test Tasks.TaskReader()
     '''
     reload(
         Ontology.FlatOntologyManager
     )  # since this has a singleton class - may be called by other nosetests earlier
     Ontology.init_global_ontology()
     task_reader = Tasks.TaskReader(taskfile=self.taskfile)
     task_reader.get_task_by_id(task_id=10001)
     task_reader.get_random_task()
Exemple #7
0
 def set(self):
     genealogy = Ontology(self.env, 'ns.service.genealogy', self.document['head']['genealogy'])
     genealogy.merge_all(self.ontology['genealogy'])
     self.document['head']['genealogy'] = genealogy.node
     
     # persist document
     self.env.resolver.save(self.document)
     
     # refetch the document
     self.document = self.env.resolver.resolve(self.uri, self.ontology['query'])
Exemple #8
0
    def test_TasksCreator(self):
        '''test Tasks.TaskCreator()
        '''

        reload(
            Ontology.FlatOntologyManager
        )  # since this has a singleton class - may be called by other nosetests earlier
        Ontology.init_global_ontology()
        task_generator = Tasks.TaskCreator()
        task_generator._create()
        task_generator._write()
Exemple #9
0
 def test_simulate_multidomain(self):
     '''Run HDC policy for domains in 2 multi domain dialogues
     '''
     Settings.init(config_file='./tests/test_configs/simulate_multiDomains_HDC.cfg')
     ContextLogger.createLoggingHandlers(config=Settings.config)
     logger = ContextLogger.getLogger('')
     logger.info("Starting HDC Multidomain Simulation")
     reload(Ontology.FlatOntologyManager)        # since this has a singleton class
     Ontology.init_global_ontology()
     simulator = Simulate.SimulationSystem(error_rate=0.1)
     simulator.run_dialogs(2)    # run 2 dialogues
Exemple #10
0
    def parse(self, query):
        for source in query["sources"]:
            try:
                document = json.load(source)
            except ValueError as e:
                self.log.warning(u"Failed to decode JSON document %s", query["remote url"])
                self.log.debug(u"Exception raised %s", unicode(e))
            else:
                if "process" in query["branch"]:
                    action = getattr(self, query["branch"]["process"], None)
                    if action is not None:
                        document = action(query, document)
                    else:
                        self.log.warning(u"Ignoring unknown process function %s", query["branch"]["process"])

                if query["branch"]["query type"] == "lookup":
                    entry = {
                        "branch": query["branch"],
                        "record": {
                            u"head": {u"genealogy": query["parameter"].project("ns.service.genealogy")},
                            u"body": {u"original": document},
                        },
                    }

                    if "namespace" in query["branch"]:
                        # make a caonical node
                        entry["record"]["body"]["canonical"] = Ontology(self.env, entry["branch"]["namespace"])
                        entry["record"]["body"]["canonical"].decode_all(entry["record"]["body"]["original"], self.name)

                        # Copy indexed values from the canonical node to the genealogy
                        if "index" in entry["branch"]:
                            for index in entry["branch"]["index"]:
                                if index in entry["record"]["body"]["canonical"]:
                                    entry["record"][u"head"][u"genealogy"][index] = entry["record"]["body"][
                                        "canonical"
                                    ][index]

                    # Append the entry to the query result
                    query["entires"].append(entry)

                elif query["branch"]["query type"] == "search":
                    for trigger in query["branch"]["resolve"]:
                        for element in document[query["branch"]["container"]]:
                            # Decode a reference
                            o = Ontology(self.env, trigger["namespace"])
                            o.decode_all(element, self.name)

                            # Make a URI and trigger a resolution
                            ref = o.project("ns.service.genealogy")
                            ref["language"]
                            uri = trigger["format"].format(**ref)
                            self.log.debug(u"Trigger %s resolution", uri)
                            self.resolver.resolve(uri)
Exemple #11
0
 def test_simulate_multidomain(self):
     '''Run 2 dialogues with the multidomain system and GP policies
     (NB: config needs to explicitly set each domain to use gp actually...)
     '''
     Settings.init(config_file='./tests/test_configs/simulate_multiDomains_GP.cfg')
     ContextLogger.createLoggingHandlers(config=Settings.config)
     logger = ContextLogger.getLogger('')
     logger.info("Starting GP Multidomain Simulation")
     reload(Ontology.FlatOntologyManager)        # since this has a singleton class
     Ontology.init_global_ontology()
     simulator = Simulate.SimulationSystem(error_rate=0.1)
     simulator.run_dialogs(2)    # run 2 dialogues
Exemple #12
0
    def test_simulate_singledomain_for_all_domains(self):
        '''Loop over all domains and runs 2 dialogues with GP policy in each
        '''
        import numpy.random as nprand
        # not seeding this - so will be based on system clock--> meaning TESTING here will be somewhat random -->
        # NOTE --> THIS IS !!!REALLY REALLY REALLY!!! NOT IN THE SPIRIT OF (UNIT) TESTING - WHICH SHOULD NEVER BE ~RANDOM
        # doing it to get a little more coverage of testing without writing permutations of explicit tests ...

        Settings.init(
            config_file=
            './tests/test_configs/simulate_singledomain_for_all_domains.cfg')
        ContextLogger.createLoggingHandlers(config=Settings.config)
        logger = ContextLogger.getLogger('')
        domains = TestingUtils.get_loop_over_domains()
        for dstring in domains:
            Settings.config.set("GENERAL", 'domains', dstring)
            logger.info("Starting GP Single Domain Simulation for " + dstring)

            # [policy_DOMAINTAG]
            Settings.config.add_section('policy_' + dstring)
            Settings.config.set('policy_' + dstring, 'learning', 'True')
            Settings.config.set('policy_' + dstring, 'policytype', 'gp')
            # no inpolicy file give --> starts with empty file (random policy)
            Settings.config.set('policy_' + dstring, 'outpolicyfile',
                                'tests/test_gp/' + dstring)
            if nprand.uniform() > 0.5:
                Settings.config.set('policy_' + dstring, 'belieftype',
                                    'baseline')
            else:
                Settings.config.set('policy_' + dstring, 'belieftype', 'focus')

            # [gppolicy_DOMAINTAG]
            Settings.config.add_section('gppolicy_' + dstring)
            if nprand.uniform() > 0.5:
                Settings.config.set('gppolicy_' + dstring, 'kernel',
                                    'polysort')
            else:
                Settings.config.set('gppolicy_' + dstring, 'kernel',
                                    'gausssort')
            if nprand.uniform() > 0.5:
                Settings.config.set('gppolicy_' + dstring, 'actionkerneltype',
                                    'delta')
            else:
                Settings.config.set('gppolicy_' + dstring, 'actionkerneltype',
                                    'hdc')
            reload(Ontology.FlatOntologyManager
                   )  # since this has a singleton class
            Ontology.init_global_ontology()
            simulator = Simulate.SimulationSystem(error_rate=0.1)
            simulator.run_dialogs(2)  # run 2 dialogues
Exemple #13
0
 def test_simulate_singledomain_for_all_domains(self):
     '''Loop over all domains and run 2 dialogues via HDC policy in each
     '''
     Settings.init(config_file='./tests/test_configs/simulate_singledomain_for_all_domains.cfg')
     ContextLogger.createLoggingHandlers(config=Settings.config)
     logger = ContextLogger.getLogger('')
     domains = TestingUtils.get_loop_over_domains()
     for dstring in domains:
         Settings.config.set("GENERAL", 'domains', dstring)
         # no policy settings given --> defaults to HDC
         logger.info("Starting HDC Single Domain Simulation for "+dstring)
         reload(Ontology.FlatOntologyManager)        # since this has a singleton class
         Ontology.init_global_ontology()
         simulator = Simulate.SimulationSystem(error_rate=0.1)
         simulator.run_dialogs(2)    # run 2 dialogues
Exemple #14
0
    def _load_mediainfo(self):
        command = self.env.initialize_command("mediainfo", self.log)
        if command:
            command.extend([u"--Language=raw", u"--Output=XML", u"--Full", self.ontology["path"]])
            proc_mediainfo = Popen(command, stdout=PIPE, stderr=PIPE)
            proc_grep = Popen([u"grep", u"-v", u"Cover_Data"], stdin=proc_mediainfo.stdout, stdout=PIPE)
            raw_xml = proc_grep.communicate()[0]

            # parse the DOM
            element = ElementTree.fromstring(raw_xml)
            if element is not None:
                for node in element.findall(u"File/track"):
                    if "type" in node.attrib:
                        mtype = self.env.enumeration["mediainfo stream type"].search(node.attrib["type"])
                        if mtype is not None:
                            if mtype.node["namespace"]:
                                # initialize an ontology with the correct namespace
                                o = Ontology(self.env, mtype.node["namespace"])

                                # iterate over the properties and populate the ontology
                                for item in list(node):
                                    text = item.text

                                    # decode base64 encoded element
                                    if "dt" in item.attrib and item.attrib["dt"] == "binary.base64":
                                        text = base64.b64decode(text)
                                        text = unicode(text, "utf8")

                                    # set the concept on the ontology
                                    o.decode(item.tag, text)

                                # fix the video encoder settings on video tracks
                                if mtype.key == "video":
                                    self._fix_mediainfo_encoder_settings(o)

                                # add the ontology to the stream stack
                                self._execution["crawl"]["stream"].append(o)

                            elif mtype.key == "menu":
                                menu = Menu(self.env)
                                for item in list(node):
                                    menu.add(Chapter.from_raw(item.tag, item.text, Chapter.MEDIAINFO))
                                menu.normalize()
                                if menu.valid:
                                    self._execution["crawl"]["menu"].append(menu)

            # Release resources held by the element, we no longer need it
            element.clear()
Exemple #15
0
    def _run(self):
        """
        Runs the build process to produce the ontology documentation.
        """
        fileoutinfos = self.getOutputFileInfos()

        # Create the destination directory, if needed.  We only need to check
        # this for in-source builds, since the BuildDirTarget dependency will
        # take care of this for out-of-source builds.
        if self.config.getDoInSourceBuilds():
            destdir = os.path.dirname(fileoutinfos[0].destpath)
            if not (os.path.isdir(destdir)):
                self._makeDirs(destdir)

        logger.info('Creating ontology documentation files...')

        ont = Ontology(self.mobt_reasoned.getOutputFilePath())

        # Create the documentation files.
        for foutinfo in fileoutinfos:
            # If the format is HTML, make sure the supporting CSS and
            # Javascript files are present.
            if foutinfo.formatstr == 'html':
                destdir = os.path.dirname(foutinfo.destpath)
                self._checkWebFiles(['documentation_styles.css', 'navtree.js'],
                                    destdir)

            writer = getDocumentationWriter(foutinfo.formatstr)
            documenter = Documenter(ont, writer)

            with open(self.config.getDocSpecificationFile()) as docspec:
                with open(foutinfo.destpath, 'w') as fout:
                    documenter.document(docspec, fout)
Exemple #16
0
 def transform(self, template):
     # apply overrides on the pivot location from the template
     if 'override' in template:
         for k,v in template['override'].iteritems():
             self.location[k] = v
             
     # apply track rules from the template
     if 'track' in template:
         for rule in template['track']:
             for branch in rule['branch']:
                 taken = False
                 for stream in self.resource.stream:
                     if stream.match(branch):
                         taken = True
                         s = Ontology.clone(stream)
                         s['resource path digest'] = self.resource.location['path digest']
                         if 'override' in rule:
                             for k,v in rule['override'].iteritems(): s[k] = v
                         self.stream.append(s)
                         
                         if rule['mode'] == 'choose':
                             break
                             
                 if taken and rule['mode'] == 'choose':
                     break
                     
     return self.taken
Exemple #17
0
 def _transcode_ac3(self, task):
     product = task.produce(task.ontology)
     if product:
         taken = False
         
         for pivot in task.transform.pivot.values():
             for stream in pivot.stream:
                 if not taken and stream['stream kind'] == 'audio':
                     taken = True
                     
                     # Clone the hint ontology
                     product.hint = Ontology.clone(self.hint)
                     
                     command = self.env.initialize_command('ffmpeg', self.log)
                     if command:
                         # make ffmpeg not check for overwrite, we already do this check
                         command.append(u'-y')
                         
                         # set the number of processing threads
                         command.append(u'-threads')
                         command.append(unicode(self.env.system['threads']))
                         
                         # set the input file
                         command.append(u'-i')
                         command.append(self.path)
                         
                         for k,v in stream['ffmpeg parameters'].iteritems():
                             command.append(k)
                             if v is not None: command.append(unicode(v))
                             
             if taken: break
         if taken and self.env.check_path_available(product.path, task.ontology['overwrite']):
             command.append(product.path)
             message = u'Transcode {} --> {}'.format(self.path, product.path)
             self.env.execute(command, message, task.ontology['debug'], pipeout=True, pipeerr=False, log=self.log)
Exemple #18
0
def init(_boardname=None):

    global player, game

    name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer1'
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer2' #NON
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer3'
    #name = _boardname if _boardname is not None else '10x10'
    #name = _boardname if _boardname is not None else 'Lab_15x15' #NON
    #name = _boardname if _boardname is not None else 'map' #NON
    #name = _boardname if _boardname is not None else 'map2' #NON
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer_6x6' #NON
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer_bomberman' #YES
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer_impossible'
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer_incroyable'
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer_labyrinthe2'
    #name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer_labyrinthe3' #NON
    #name = _boardname if _boardname is not None else 'thirst' #NON

    game = Game('Cartes/' + name + '.json', SpriteBuilder)
    game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
    game.populate_sprite_names(game.O)
    game.fps = 20  # frames per second
    game.mainiteration()
    game.mask.allow_overlaping_players = True
Exemple #19
0
def init(_boardname=None):
    global player, game
    name = _boardname if _boardname is not None else 'pathfindingWorld_multiPlayer'
    game = Game('Cartes/' + name + '.json', SpriteBuilder)
    game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
    game.populate_sprite_names(game.O)
    game.fps = 5  # frames per second
    game.mainiteration()
    def _run(self):
        """
        Runs the build process and produces a new, modified version of the main
        OWL ontology file.
        """
        timer = BasicTimer()
        timer.start()

        self._retrieveAndCheckFilePaths()

        mainont = Ontology(self.obt.getOutputFilePath())

        if self.mergeimports:
            # Merge the axioms from each imported ontology directly into this
            # ontology (that is, do not use import statements).
            logger.info(
                'Merging all imported ontologies into the main ontology...')
            for importIRI in mainont.getImports():
                mainont.mergeOntology(importIRI,
                                      self.config.getAnnotateMerged())

        if self.prereason:
            logger.info('Running reasoner and adding inferred axioms...')
            inf_types = self.config.getInferenceTypeStrs()
            annotate_inferred = self.config.getAnnotateInferred()
            preprocess_inverses = self.config.getPreprocessInverses()
            iaa = InferredAxiomAdder(mainont, self.config.getReasonerStr())
            if self.config.getExcludedTypesFile() != '':
                iaa.loadExcludedTypes(self.config.getExcludedTypesFile())
            iaa.addInferredAxioms(inf_types, annotate_inferred,
                                  preprocess_inverses)

        fileoutpath = self.getOutputFilePath()

        # Set the ontology IRI.
        ontIRI = self.config.generateDevIRI(fileoutpath)
        mainont.setOntologyID(ontIRI)

        # Write the ontology to the output file.
        logger.info('Writing compiled ontology to ' + fileoutpath + '...')
        mainont.saveOntology(fileoutpath, self.config.getOutputFormat())

        if self.mergeimports and self.prereason:
            msgtxt = 'Merged and reasoned '
        elif self.mergeimports:
            msgtxt = 'Merged '
        else:
            msgtxt = 'Reasoned '

        logger.info((msgtxt + 'ontology build completed in {0} s.\n').format(
            timer.stop()))
def init_game(_boardname=None):
    global player, game
    name = _boardname if _boardname is not None else 'kolkata_6_10'
    game = Game('Cartes/' + name + '.json', SpriteBuilder)
    game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
    game.populate_sprite_names(game.O)
    game.fps = 20  # frames per second
    game.mainiteration()
    game.mask.allow_overlaping_players = True
Exemple #22
0
def init(_boardname=None):
    global player,game
    # pathfindingWorld_MultiPlayer4
    name = _boardname if _boardname is not None else 'tictactoe'
    game = Game('Cartes/' + name + '.json', SpriteBuilder)
    game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
    game.populate_sprite_names(game.O)
    game.fps = 2  # frames per second
    game.mainiteration()
    game.mask.allow_overlaping_players = True
Exemple #23
0
def init(_boardname=None):
    global player, game, tailleV, tailleH
    name = _boardname if _boardname is not None else 'pathfindingWorld3'
    game = Game('Cartes/' + name + '.json', SpriteBuilder)
    game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
    game.populate_sprite_names(game.O)
    game.fps = 5  # frames per second
    game.mainiteration()
    player = game.player
    tailleV = game.spriteBuilder.rowsize
    tailleH = game.spriteBuilder.colsize
Exemple #24
0
def psims_example():
    ontology = Ontology(filename='psi-ms.obo', verbose=1)
    ontology.show()
    print("============================")
    term = ontology.terms["MS:1002286"]
    term.show()
    print("============================")
    name = 'QIT'
    print(f"Searching for '{name}'")
    if name in ontology.names:
        curies = ontology.names[name]
        for curie in curies:
            term = ontology.terms[curie]
            term.show()
    print("============================")
    name = 'bit'
    result_list = ontology.fuzzy_search(search_string=name)
    for item in result_list:
        print(item)
    print("============================")
    name = 'bit'
    result_list = ontology.fuzzy_search(search_string=name,
                                        children_of="MS:1000031")
    for item in result_list:
        print(item)
Exemple #25
0
def parse(oboFile, typedefs):
    """
    Parses an OBO (ontology file) creating a correspoinding Ontology 
    object composed of Term objects that relate to the terms found in the
    ontology file.
    """
    ontology = Ontology()

    ## Use groupby to group the lines of our file into chunks of text, some
    ## stanzas, some typedefs, and other metadata to be processed
    with open(oboFile) as f:
        for (key, group) in groupby(f, is_data):
            if key:
                header = group.next().rstrip('\n')

                if header.find('[Typedef]') != -1:
                    dataDict = get_data_as_dict(group)
                    ontology.add_typedef(dataDict['name'])
                elif header.find('[Term]') != -1:
                    dataDict = get_data_as_dict(group, typedefs)
                    ontology.add_term(build_term(dataDict))
                else:
                    # We are dealing with ontology metadata that should be
                    # captured in our ontology object.
                    ontology.metadata.append(header)
                    ontology.metadata.extend([x.strip() for x in group])

    return ontology
    def _run(self):
        """
        Runs the inferencing pipeline.
        """
        #self._retrieveAndCheckFilePaths()

        if self.srcpath != '':
            sourceont = Ontology(self.srcpath)
        else:
            sourceont = Ontology(JavaSystem.in)

        logger.info('Running reasoner and adding inferred axioms...')
        inf_types = self.config.getInferenceTypeStrs()
        annotate_inferred = self.config.getAnnotateInferred()
        preprocess_inverses = self.config.getPreprocessInverses()
        iaa = InferredAxiomAdder(sourceont, self.config.getReasonerStr())
        if self.config.getExcludedTypesFile() != '':
            iaa.loadExcludedTypes(self.config.getExcludedTypesFile())
        iaa.addInferredAxioms(
            inf_types, annotate_inferred, preprocess_inverses
        )

        # Write the ontology to the output file or stdout.
        format_str = self.config.getOutputFormat()
        if self.outpath != '':
            logger.info('Writing compiled ontology to ' + self.outpath + '...')
            sourceont.saveOntology(self.outpath, format_str)
        else:
            sourceont.printOntology(format_str)
Exemple #27
0
 def tag(self, task):
     update = Ontology(self.env, 'ns.medium.resource.meta.tag')
     meta = self.meta.project('ns.medium.resource.meta.tag')
     knowledge = Ontology(self.env, 'ns.medium.resource.meta.tag', self.knowledge['body'])
     genealogy = Ontology(self.env, 'ns.service.genealogy', self.knowledge['head']['genealogy'])
     knowledge.merge_all(genealogy)
     
     # Everything that is in meta but doesn't fit knowledge
     # should be replaced with the value in knowledge 
     for i in meta.keys():
         if meta[i] != knowledge[i]:
             update[i] = knowledge[i]
             
     # Everything that is in knowledge but not in meta
     # should be set to the value in knowledge 
     for i in knowledge.keys():
         if i not in meta:
             update[i] = knowledge[i]
             
     modify = []
     for k,v in update.iteritems():
         prototype = update.namespace.find(k)
         if prototype and prototype.node['subler']:
             modify.append(u'{{{}:{}}}'.format(prototype.node['subler'],v))
             
     print unicode(modify).encode('utf-8')
Exemple #28
0
def run_cso_classifier(paper,
                       modules="both",
                       enhancement="first",
                       explanation=False):

    if modules not in ["syntactic", "semantic", "both"]:
        raise ValueError(
            "Error: Field modules must be 'syntactic', 'semantic' or 'both'")

    if enhancement not in ["first", "all", "no"]:
        raise ValueError(
            "Error: Field enhances must be 'first', 'all' or 'no'")

    if type(explanation) != bool:
        raise ValueError(
            "Error: Explanation must be set to either True or False")

    # Loading ontology and model
    cso = CSO()
    model = MODEL()
    t_paper = Paper(paper, modules)
    result = Result(explanation)

    # Passing parameters to the two classes (synt and sema) and actioning classifiers

    if modules == 'syntactic' or modules == 'both':
        synt_module = synt(cso, t_paper)
        result.set_syntactic(synt_module.classify_syntactic())
        if explanation:
            result.dump_temporary_explanation(synt_module.get_explanation())
    if modules == 'semantic' or modules == 'both':
        sema_module = sema(model, cso, t_paper)
        result.set_semantic(sema_module.classify_semantic())
        if explanation:
            result.dump_temporary_explanation(sema_module.get_explanation())

    result.set_enhanced(
        cso.climb_ontology(getattr(result, "union"), enhancement))

    return result.get_dict()
    def _run(self):
        """
        Reads the source ontologies and looks for the search terms.
        """
        ef = EntityFinder()

        for search_ont in self.search_onts:
            logger.info('Reading source ontology {0}...'.format(search_ont))
            ontology = Ontology(search_ont)
            logger.info('Processing ontology entities...')
            ef.addOntologyEntities(ontology)

        if self.tfpath != '':
            termsin = open(self.tfpath)
        else:
            termsin = sys.stdin

        if self.outpath != '':
            logger.info('Writing search results to ' + self.outpath + '...')
            fout = open(self.outpath, 'w')
        else:
            fout = sys.stdout

        writer = csv.DictWriter(fout,
                                fieldnames=[
                                    'Search term', 'Matching entity',
                                    'Label(s)', 'Annotation', 'Value',
                                    'Match type', 'Definition(s)'
                                ])
        writer.writeheader()

        row = {}
        for searchterm in termsin:
            searchterm = searchterm.strip()
            results = ef.findEntities(searchterm)

            for result in results:
                entity = result[0]

                row['Search term'] = searchterm
                row['Matching entity'] = str(entity.getIRI())
                row['Label(s)'] = ','.join(entity.getLabels())
                row['Annotation'] = result[1]
                row['Value'] = result[2]
                row['Definition(s)'] = ','.join(entity.getDefinitions())

                if result[3] == MATCH_FULL:
                    row['Match type'] = 'Full'
                else:
                    row['Match type'] = 'Partial'

                writer.writerow(row)
def init(_boardname=None):
    global player, game, nbreCol, nbreLig
    # pathfindingWorld_MultiPlayer4
    name = _boardname if _boardname is not None else 'pathfindingWorld_MultiPlayer5'
    game = Game('Cartes/' + name + '.json', SpriteBuilder)
    game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
    game.populate_sprite_names(game.O)
    game.fps = 5  # frames per second
    game.mainiteration()
    game.mask.allow_overlaping_players = True
    nbreCol = game.spriteBuilder.colsize - 1
    nbreLig = game.spriteBuilder.rowsize - 1
    player = game.player
    def __init__(self, base_ont_path):
        # Load the base ontology.
        self.ontology = Ontology(base_ont_path)

        # Get a PrefixDocumentFormat/PrefixManager instance so that we can
        # generate prefix IRIs for label expansions in text entity definitions.
        owlont = self.ontology.getOWLOntology()
        ontman = self.ontology.getOntologyManager()
        self.prefix_df = ontman.getOntologyFormat(owlont).asPrefixOWLOntologyFormat()

        # A list (used as a stack) for caching information in _TableRow objects
        # and their associated ontology entity objects.  Each list entry is
        # stored as a tuple, (ontology entity instance, _TableRow instance).
        self.entity_trows = []

        # Create a delimited string parser for parsing multiple values out of
        # input fields.
        self.dsparser = DelimStrParser(delimchars=';', quotechars='"')

        # Create a delimited string parser for parsing the components of
        # strings with whitespace-separated components, such as individual
        # property assertions (facts).
        self.ws_dsparser = DelimStrParser(delimchars=' \t', quotechars='"\'')
Exemple #32
0
def checkOntology(domain):
    Settings.load_root(root)
    Settings.load_config(None)
    Settings.config.add_section("GENERAL")
    Settings.config.set("GENERAL", 'domains', domain)

    Ontology.init_global_ontology()

    gOnt = Ontology.global_ontology
    for dstring in gOnt.possible_domains:
        informable = gOnt.get_informable_slots(dstring)

        for slot in informable:
            valuesJson = gOnt.get_informable_slot_values(dstring, slot)
            results = gOnt.ontologyManagers[dstring].db.customQuery(
                'SELECT DISTINCT {} FROM {} ORDER BY {} ASC'.format(
                    slot, dstring, slot))
            valuesDB = [entry[slot] for entry in results]
            # valuesDB = map(lambda a : a.lower(),valuesDB)
            valuesJson.append("not available")
            difference = list(set(valuesDB) - set(valuesJson))
            if len(difference):
                print dstring + " " + slot + " " + str(difference)
Exemple #33
0
    def _run(self):
        """
        Checks for entailment errors in the main ontology.
        """
        mainont = Ontology(self.obt.getOutputFilePath())
        timer = BasicTimer()

        logger.info('Checking for entailment errors...')
        timer.start()
        entcheck_res = mainont.checkEntailmentErrors(
            self.config.getReasonerStr())
        logger.info('Logical error check completed in {0} s'.format(
            timer.stop()))

        if not (entcheck_res['is_consistent']):
            logger.info(
                '\nERROR: The ontology is inconsistent (that is, it has no '
                'models).  This is often caused by the presence of an '
                'individual (that is, a class instance) that is explicitly or '
                'implicitly a member of two disjoint classes.  It might also '
                'indicate an underlying modeling error.  Regardless, it is a '
                'serious problem because an inconsistent ontology cannot be '
                'used for logical inference.\n')
        else:
            class_list = entcheck_res['unsatisfiable_classes']
            if len(class_list) > 0:
                iri_strs = [ent.getIRI().toString() for ent in class_list]
                classes_str = '<' + '>\n<'.join(iri_strs) + '>'

                logger.info(
                    '\nERROR: The ontology is consistent but incoherent '
                    'because it contains one or more unsatisfiable classes.  '
                    'This usually indicates a modeling error.  The following '
                    'classes are unsatisfiable:\n' + classes_str + '\n')
            else:
                logger.info('\nThe ontology is consistent and coherent.  No '
                            'entailment problems were found.\n')
Exemple #34
0
 def produce(self, override=None):
     # copy the location ontology
     p = Ontology.clone(self.location)
     
     # allow the location to recalculate those concepts 
     del p['volume path']
     del p['file name']
     del p['directory']
     
     # explicitly set the volume and host from the task
     p['host'] = self.env.host
     p['volume'] = self.ontology['volume']
     
     # for copy and move we try to set a profile from the source
     if self.ontology['action'] in set(('copy', 'move', 'pack')):
         if self.resource.meta['profile']:
             p['profile'] = self.resource.meta['profile']
             
     # for transcode we try to set the profile from the transform
     elif self.ontology['action'] == 'transcode':
         for pivot in self.transform.pivot.values():
             if 'profile' in pivot.location:
                 p['profile'] = pivot.location['profile']
                 
     # whatever happened, if a profile has been explicitly provided by the task
     # it will override anything we set implicitly
     if self.ontology['profile']:
         p['profile'] = self.ontology['profile']
         
     # if an override was given set some concepts from it 
     if override:
         for i in set((
             'kind', 
             'language',
             'stream order',
             'resource path digest',
             'routing type'
         )):
             if i in override: p[i] = override[i]
             
     # try to produce a product
     product = self.resource.asset.locate_resource(p)
     if product:
         self.product.append(product)
     else:
         self.log.error(u'Could not determine destination path from:\n%s', self.env.encode_json(p))
         
     return product
Exemple #35
0
 def __init__(self, boardNumber=1, boardName=None, fps=5, iterations=100):
     """
     boardNumber : identifiant de la carte de jeu à utiliser.
     fps : nombre de cadres par seconde.
     iterations : nombre maximal d'itérations.
     """
     global game
     if boardName is None:
         game = Game(
             'Cartes/pathfindingWorld_MultiPlayer' + str(boardNumber) +
             '.json', SpriteBuilder)
     else:
         game = Game('Cartes/' + str(boardName) + '.json', SpriteBuilder)
     game.O = Ontology(True,
                       'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
     game.populate_sprite_names(game.O)
     game.fps = fps  # frames per second
     game.mainiteration()
     game.mask.allow_overlaping_players = False
     self.iterations = iterations
Exemple #36
0
def parse_tree(tree_filename):
    """Build tree given json file.

  Args:
    tree_filename: A string, path of json file describing tree from leaf to root

  Returns:
    tree_leaf2root: leaf to root dict
    sumrule: sumrule dict.
  """
    with open(tree_filename, 'rb') as fp:
        tree_str = json.loads(fp.read())
        tree_leaf2root = {}
        for k, v in tree_str.items():
            if v is not None:
                tree_leaf2root[int(k)] = int(v)
            else:
                tree_leaf2root[int(k)] = None
        tree = Ontology(tree_leaf2root)
        sumrule = tree.sumrules
        return tree_leaf2root, sumrule
    def __init__(self, _boardname=None, swap=False, affiche=True):
        self.game = Game()
        name = _boardname if _boardname is not None else 'at2'
        self.game = Game('Cartes/' + name + '.json', SpriteBuilder)
        self.game.O = Ontology(
            True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')
        self.game.populate_sprite_names(self.game.O)
        self.game.fps = 2  # frames per second
        self.affiche = affiche
        if affiche:
            self.game.mainiteration()
        self.game.mask.allow_overlaping_players = True

        # player = game.player

        # on localise tous les états initiaux (loc du joueur)
        self.initStates = [o.get_rowcol() for o in self.game.layers['joueur']]
        print("Init states:", self.initStates)

        # on localise tous les objets ramassables
        self.goalStates = [
            o.get_rowcol() for o in self.game.layers['ramassable']
        ]
        print("Goal states:", self.goalStates)

        # on localise tous les murs
        self.wallStates = [
            w.get_rowcol() for w in self.game.layers['obstacle']
        ]
        # print ("Wall states:", self.wallStates)

        self.nbIteration = 0

        if swap:
            self.goalStates[0], self.goalStates[1] = self.goalStates[
                1], self.goalStates[0]

        self.players = [o for o in self.game.layers['joueur']]
Exemple #38
0
    def _run(self):
        """
        Runs the build process and produces a compiled OWL ontology file.
        """
        # Get the imports modules IRIs from the imports build target.
        importinfos = self.ibt.getImportsInfo()

        self._retrieveAndCheckFilePaths()

        baseont = Ontology(self.base_ont_path)
        # Add an import declaration for each import module.
        for importinfo in importinfos:
            baseont.addImport(importinfo.iristr, True)

        # Write the base ontology to the output file.
        fileoutpath = self.getOutputFilePath()
        logger.info('Writing updated base ontology to ' + fileoutpath + '...')
        baseont.saveOntology(fileoutpath)
Exemple #39
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#'))
def all_url_test():
    l = classgenerator.Library()
    l2 = classgenerator.Library()
    l2.base_path = ["gentest","lib","ontologies"]                    
    #print ("test of getting global data")
    g = test_global_data_files.get_global_data()
    #print (g)
    predicates = Counter()
    subjects = Counter()
    predicate_types = Counter()
    predicate_types2 = Counter()
    objects = Counter()
    rebind(g)
    for x in g:
        p = x[1]
        s = x[0]
        o = x[2]
        predicates[p] += 1
        subjects[s] += 1
        objects[o] += 1
    print "predicates"

    seen = {}
    libs = {}    
    for (p,v) in  predicates.most_common(4230):
        if 'openlinksw.com' in p:
            continue
        if 'cc.rww.io' in p :
            continue
        
        p2 = g.namespace_manager.qname(p)

        (ns,term) = p2.split(':')
        m = g.namespace_manager.store.namespace(ns)

        # skip
        if str(m) =='http://www.w3.org/1999/xhtml/vocab#':
            continue
    
        if ns not in seen  :
            #print "NS",ns, m
            
            seen[ns]=1
            if 'ns' in ns:
                print "g.namespace_manager.bind(\"{prefix}\",\"{url}\",True)  ".format(prefix=ns,url=m)
                #pass
            path = l.get_module(ns,m)
            #print ns, m, path
            if path:
                importl = l.get_import_path(path)
                prefix = l.get_import_path(ns)
            
                #l.createpath(path)
                #l.create_module(path,prefix,url=m)
                #print "import {module} as {prefix}".format(module=importl,prefix=ns)
                replace= {
                    'http://purl.org/dc/dcam/' : 'https://raw.githubusercontent.com/dcmi/vocabtool/master/build/dcam.rdf'
                }
                
                if str(m) in replace :
                    o = replace[str(m)]
                    #print "replacing " ,m,"with", o
                    m = o

                _format = 'guess'
                turtles = [
                    'http://www.w3.org/ns/ldp#',
                    'http://usefulinc.com/ns/doap#',
                    
                ]
                
                if str(m) in turtles  :
                    _format = 'turtle'
                xmls = [
                    'http://xmlns.com/foaf/0.1/',
                    'http://www.w3.org/ns/auth/cert#',
                    'http://www.w3.org/ns/auth/acl#',
                    'http://www.w3.org/2000/10/swap/pim/doc#',
                    'http://www.w3.org/2003/06/sw-vocab-status/ns#',
                ]
                
                if str(m) in xmls  :
                    _format = 'xml'
                o = Ontology(url=m,prefix=prefix,_format=_format)
                o.set_path(path)
                #print "prefix", prefix, m
                libs[prefix]=o
                
    ## now revisit the graph and link it
    #pprint.pprint(libs)
        
    for p in libs:

        o = libs[p]
        prefix = o.prefix
        
        #print "Lib", p, o.path
        og = o.fetch(g.namespace_manager)
        rebind(og)
        od = o.extract_graph(og,l, libs)

        ours = od[0]
        others = od[2]
        prefixs = od[2]
        code = []

        importcode = []

        
        # create members
        used_prefixes= {}
        for x in ours :
            if 'http' in x :
                pass
            else:
                types=[]
                
                # lets try and create a class
                attrs = ours[x]
                for y in attrs:
                    p = y[0]
                    s = y[1]
                    p1 = resolve(p)
                    s1 = resolve(s)
                    if p1 == 'rdf.type' :
                        if s1 == 'owl.Restriction' :
                            pass
                        else:
                            types.append(s1)
                            ## append to used types
                            #print "check prefix for import",s,s1
                            
                            used_prefixes[s[0]] =1 
                        #print "\t","pred",p1,s1

                if len(types) > 0:
                    caml= convert2(x)
                    short= convert(x)
                    if caml.startswith('Ub'):
                        pass
                    else:
                    
                        classcode = ast.parse("class {_class}({base}):\n    term=\"{name}\"\n".format(
                            _class=caml,
                            name=x,
                            base=",".join(types)))
                        used_prefixes[prefix]=1
                        alias_code =  ast.parse("{alias} = {_class}()\n".format(
                            prefix=prefix,
                            alias=short,
                            _class=caml))

                        code.append(classcode)
                        code.append(alias_code)


        ##### create prefixes
        for x in prefixs:
            m = prefixs[x]

            if x not in used_prefixes:
                continue # remove unused prefixes
        
            if x == o.prefix :
                continue
            
            import_module = Module(body=[ImportFrom(
                module=m.module_name(),
                names=[alias(
                name='ontology',
                    asname=x)],
                level=0)])

            #code = "from {module} import ontology as {alias}".format(module=m.module_name(), alias=x)
            # x = Import(names=[alias(
            #     name=m.module_name(),
            #     asname=None)]),
            #print(astunparse.dump(ast.parse(code)))
            importcode.append(import_module)

        
        ###
        if True:
            npath= "gentest/" + o.path
            #path = l.get_module(ns,m)
            #print ns, m, path
            importl = l.get_import_path(npath)
            #prefix = l.get_import_path(ns)
            l.createpath(npath)
            print "npath",npath
            #print code
            l.create_module(npath,o.prefix,url=o.base,members=code,imports=importcode)
Exemple #41
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://purl.org/dc/dcam/'))
Exemple #42
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/1999/xhtml/vocab#'))
Exemple #43
0
 def parse(self, query):
     for source in query['sources']:
         try:
             document = json.load(source)
         except ValueError as e:
             self.log.warning(u'Failed to decode JSON document %s', query['remote url'])
             self.log.debug(u'Exception raised %s', unicode(e))
         else:
             if 'process' in query['branch']:
                 # Preprocessing the entry.
                 # Method should return a document similar to normal itunes api calls
                 action = getattr(self, query['branch']['process'], None)
                 if action is not None:
                     document = action(document)
                 else:
                     self.log.warning(u'Ignoring unknown process function %s', query['branch']['process'])
                     
             if not document['resultCount'] > 0:
                 self.log.debug(u'No results found for query %s', query['remote url'])
             else:
                 if query['branch']['query type'] == 'lookup':
                     for element in document['results']:
                         for product in query['branch']['produce']:
                             if satisfies(element, product['condition']):
                                 
                                 entry = {
                                     'branch':product['branch'],
                                     'record':{
                                         u'head':{ u'genealogy':Ontology(self.env, 'ns.service.genealogy'), },
                                         u'body':{ u'original':element },
                                     }
                                 }
                                 
                                 # make a caonical node
                                 entry['record']['body']['canonical'] = Ontology(self.env, entry['branch']['namespace'])
                                 entry['record']['body']['canonical'].decode_all(entry['record']['body']['original'], self.name)
                                 
                                 # Copy indexed values from the canonical node to the genealogy
                                 if 'index' in entry['branch']:
                                     for index in entry['branch']['index']:
                                         if index in entry['record']['body']['canonical']:
                                             entry['record'][u'head'][u'genealogy'][index] = entry['record']['body']['canonical'][index]
                                             
                                 # Only produce once for each element
                                 query['entires'].append(entry)
                                 break
                                 
                 elif query['branch']['query type'] == 'search':
                     for trigger in query['branch']['resolve']:
                         for element in document['results']:
                             if satisfies(element, trigger['condition']):
                                 
                                 # Decode concepts from the element and populate the ontology
                                 o = Ontology(self.env, trigger['namespace'])
                                 o.decode_all(element, self.name)
                                     
                                 # Make a URI and trigger a resolution
                                 ref = o.project('ns.service.genealogy')
                                 ref['language']
                                 uri = trigger['format'].format(**ref)
                                 self.log.debug(u'Trigger %s resolution', uri)
                                 self.resolver.resolve(uri)
Exemple #44
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/2002/12/cal/icalSpec#'))
Exemple #45
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/ns/adms#'))
Exemple #46
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'https://cc.rww.io/vocab#'))
Exemple #47
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://purl.org/dc/elements/1.1/'))
Exemple #48
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/2007/05/powder-s#'))
Exemple #49
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://purl.org/vocab/changeset/schema#'))
Exemple #50
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.rddl.org/purposes#'))
Exemple #51
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://rdfs.org/ns/void#'))
Exemple #52
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.loc.gov/mads/rdf/v1#'))
Exemple #53
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://webns.net/mvcb/'))
Exemple #54
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://creativecommons.org/ns#'))
Exemple #55
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/2003/06/sw-vocab-status/ns#'))
 def __init__(self):
     Ontology.__init__(self, Namespace(u'http://www.w3.org/2000/01/rdf-schema#'))
 def __init__(self):
     Ontology.__init__(self, Namespace(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#'))
Exemple #58
0
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://www.w3.org/2003/01/geo/wgs84_pos#'))
 def __init__(self):
     Ontology.__init__(self, rdflib.term.URIRef(u'http://id.loc.gov/vocabulary/identifiers/'))