Exemple #1
0
def main():
    # Re-create turtle examples from template
    recompute_all_ex.main()
    # Convert turtle to provn and upload to Prov Store
    UpdateExampleReadmes.main()
    # Update terms README
    UpdateTermReadme.main()
    # Update specifications
    create_results_specification.main("dev")
    create_expe_specification.main()
Exemple #2
0
def main():
    # Re-create turtle examples from template
    recompute_all_ex.main()
    # Convert turtle to provn and upload to Prov Store
    UpdateExampleReadmes.main()
    # Update terms README
    UpdateTermReadme.main()
    # Update specifications
    create_results_specification.main("dev")
    create_expe_specification.main()
Exemple #3
0
def main():
    # --- NIDM-Experiment
    # Update terms README
    UpdateExpTermReadme.main()

    # --- NIDM-Results
    # Re-create turtle examples from template
    recompute_all_ex.main()
    # Convert turtle to provn and upload to Prov Store
    UpdateExampleReadmes.main()
    # Update terms README
    UpdateTermReadme.main()
    # Update specifications
    create_results_specification.main("dev")
    create_expe_specification.main()
    # Update csv file of preferred prefixes
    create_prefixes.main()
Exemple #4
0
    def create_release(self):
        owl_file = os.path.join(TERMS_FOLDER, 'nidm-results.owl')
        assert os.path.exists(owl_file)

        # Copy the owl file to the release folder
        release_owl_file = os.path.join(
            RELEASED_TERMS_FOLDER, "nidm-results_%s.owl" % (self.nidm_version))
        shutil.copyfile(owl_file, release_owl_file)

        with open(release_owl_file, 'r') as fp:
            owl_txt = fp.read()

        # Remove imports and copy the import directly in the release file
        match = re.search(
            r'\[[\w:;\n\s]' + r'*(?P<imports>(\s*<.*>\s*,?\s*\n)*)\s*\] \.',
            owl_txt)
        if match:
            owl_txt = owl_txt.replace(match.group(), "")

            owl_imports = re.findall(r"<.*>", match.group("imports"))

            for im in owl_imports:
                im = im.replace("<", "").replace(">", "")
                im_name = self.get_import_name(im)
                name = im_name.split("-")[0].replace("_import", "")

                im_file = os.path.join(IMPORT_FOLDER, im_name + ".ttl")

                if os.path.exists(im_file):
                    with open(im_file, 'r') as fp:
                        im_txt = fp.read()
                else:
                    response = urllib2.urlopen(im + '.ttl')
                    im_txt = response.read()

                # Replace prefix ":" by named namespace in import
                default_match = re.search(r'@prefix : <.*>', im_txt)
                if default_match:
                    im_txt = im_txt.replace(" :", " " + name + ":")
                    im_txt = im_txt.replace("\n:", "\n" + name + ":")

                # Remove base prefix
                base_match = re.search(r'@base <.*>', im_txt)
                if base_match:
                    im_txt = im_txt.replace(base_match.group(), "")

                # Copy missing prefixes in nidm-results owl file
                prefixes = re.findall(r'@prefix \w+: <.*> \.\n', im_txt)
                for prefix in prefixes:
                    if not prefix in owl_txt:
                        owl_txt = prefix + owl_txt
                    im_txt = im_txt.replace(prefix, "")

                owl_txt = owl_txt + "\n\n##### Imports from %s #####" % (name)
                owl_txt = owl_txt + im_txt

        # Remove AFNI-related terms (not ready for release yet)
        if int(self.nidm_version) <= 110:
            owl_txt = owl_txt.replace(
                "@prefix afni: <http://purl.org/nidash/afni#> .\n", "")
            # Remove terms: nidm:'Legendre Polynomial Order', afni:'BLOCK',
            # afni:'GammaHRF' and afni:'LegendrePolynomialDriftModel'
            terms_under_development = [
                NIDM['NIDM_0000014'], AFNI['BLOCK'], AFNI['GammaHRF'],
                AFNI['LegendrePolynomialDriftModel']
            ]
            for term in terms_under_development:
                m = re.search(
                    re.escape("###  " + str(term)) + r"[^\#]*\.", owl_txt)
                owl_txt = owl_txt.replace(m.group(), "")

        with open(release_owl_file, 'w') as fp:
            fp.write(owl_txt)

        # Create specification (before the address of examples are updated to
        # avoid issue with examples pointing to tag not pushed yet)
        create_spec(self.nidm_original_version)
        if self.nidm_version == "100":
            image_dir_prev = os.path.join(SPECSPATH, "img", "nidm-results_100")
        image_dir = os.path.join(SPECSPATH, "img",
                                 "nidm-results_" + self.nidm_version)
        if not os.path.isdir(image_dir):
            shutil.copytree(image_dir_prev, image_dir)

        # Update version number in examples and in script files
        script_files = glob.glob(
            os.path.join(NIDMRESULTSPATH, "scripts", "*.py"))
        for script in script_files:
            if not script.endswith("release_nidm_results.py"):
                with open(script, 'r') as fp:
                    script_txt = fp.read()
                with open(script, 'w') as fp:
                    fp.write(
                        script_txt.replace(
                            'version="dev"',
                            'version="' + self.nidm_original_version + '"'))
        # Re-create turtle examples from template
        recompute_all_ex.main()
        # Convert turtle to provn and upload to Prov Store
        UpdateExampleReadmes.main()
        # Update terms README
        UpdateTermReadme.main()
        # Update specifications

        # Replace address of examples
        owl_txt = owl_txt.replace(
            "https://raw.githubusercontent.com/incf-nidash/nidm/\
master/", "https://raw.githubusercontent.com/incf-nidash/nidm/\
NIDM-Results_" + self.nidm_original_version + "/")

        with open(release_owl_file, 'w') as fp:
            fp.write(owl_txt)
    def create_release(self):
        owl_file = os.path.join(TERMS_FOLDER, 'nidm-results.owl')
        assert os.path.exists(owl_file)

        # Copy the owl file to the release folder
        release_owl_file = os.path.join(
            RELEASED_TERMS_FOLDER, "nidm-results_%s.owl" % (self.nidm_version))
        shutil.copyfile(owl_file, release_owl_file)

        with open(release_owl_file, 'r') as fp:
            owl_txt = fp.read()

        # Remove imports and copy the import directly in the release file
        match = re.search(
            r'\[[\w:;\n\s]' + r'*(?P<imports>(\s*<.*>\s*,?\s*\n)*)\s*\] \.',
            owl_txt)
        if match:
            owl_txt = owl_txt.replace(match.group(), "")

            owl_imports = re.findall(r"<.*>", match.group("imports"))

            for im in owl_imports:
                im = im.replace("<", "").replace(">", "")
                im_name = self.get_import_name(im)
                name = im_name.split("-")[0].replace("_import", "")

                im_file = os.path.join(IMPORT_FOLDER, im_name + ".ttl")

                if os.path.exists(im_file):
                    with open(im_file, 'r') as fp:
                        im_txt = fp.read()
                else:
                    response = urllib2.urlopen(im + '.ttl')
                    im_txt = response.read()

                # Replace prefix ":" by named namespace in import
                default_match = re.search(r'@prefix : <.*>', im_txt)
                if default_match:
                    im_txt = im_txt.replace(" :", " " + name + ":")
                    im_txt = im_txt.replace("\n:", "\n" + name + ":")

                # Remove base prefix
                base_match = re.search(r'@base <.*>', im_txt)
                if base_match:
                    im_txt = im_txt.replace(base_match.group(), "")

                # Remove description of subset ontology
                this_ontology_match = re.search(
                    r'<http://[^\n]*?> rdf:type owl:Ontology .*?\..*?###',
                    im_txt, re.DOTALL)
                if this_ontology_match:
                    im_txt = im_txt.replace(this_ontology_match.group(), "###")

                # Copy missing prefixes in nidm-results owl file
                prefixes = re.findall(r'@prefix \w+: <.*> \.\n', im_txt)
                for prefix in prefixes:
                    if not prefix in owl_txt:
                        owl_txt = prefix + owl_txt
                    im_txt = im_txt.replace(prefix, "")

                owl_txt = owl_txt + "\n\n##### Imports from %s #####" % (name)
                owl_txt = owl_txt + im_txt

        # Remove AFNI-related terms (not ready for release yet)
        if int(self.nidm_version.split("-rc")[0]) <= 130:
            owl_txt = owl_txt.replace(
                "@prefix afni: <http://purl.org/nidash/afni#> .\n", "")
            # Remove terms: nidm:'Legendre Polynomial Order', afni:'BLOCK',
            # afni:'GammaHRF' and afni:'LegendrePolynomialDriftModel'
            # and 'vertices' terms not yet in use
            terms_under_development = [
                NIDM['NIDM_0000014'], AFNI['BLOCK'], AFNI['GammaHRF'],
                AFNI['LegendrePolynomialDriftModel'], NIDM['NIDM_0000083'],
                NIDM['NIDM_0000137'], NIDM['NIDM_0000142'],
                NIDM['NIDM_0000158'], SPM['SPM_0000011'], SPM['SPM_0000012']
            ]

            # Remove the reification property (to be further discussed with
            # STATO)
            terms_under_development += [
                OBO['IAO_0000136'], OBO['STATO_0000088'], OBO['STATO_0000129'],
                NIDM['NumberOfSubjectsReification']
            ]
            it = 0
            for term in terms_under_development:
                m = True
                # For loop to insure that we replace all occurences of the term
                # (this can happen if a term is defined in an import and
                # further relations are added in the nidm-results.owl file)
                while m:
                    m = re.search(
                        re.escape("###  " + str(term)) + r"[^\#]*\.", owl_txt)

                    if m:
                        owl_txt = owl_txt.replace(m.group(), "")
                        it = it + 1
                    elif it == 0:
                        raise Exception(str(term) + " not found")

        with open(release_owl_file, 'w') as fp:
            fp.write(owl_txt)

        # Create specification (before the address of examples are updated to
        # avoid issue with examples pointing to tag not pushed yet)
        create_spec(self.nidm_original_version)
        image_dir_prev = os.path.join(SPECSPATH, "img", "nidm-results_dev")
        image_dir = os.path.join(SPECSPATH, "img",
                                 "nidm-results_" + self.nidm_version)
        if not os.path.isdir(image_dir):
            shutil.copytree(image_dir_prev, image_dir)

        # Update version number in examples and in script files
        script_files = glob.glob(
            os.path.join(NIDMRESULTSPATH, "scripts", "*.py"))
        for script in script_files:
            if not script.endswith("release_nidm_results.py"):
                with open(script, 'r') as fp:
                    script_txt = fp.read()
                with open(script, 'w') as fp:
                    fp.write(
                        script_txt.replace(
                            'version="dev"',
                            'version="' + self.nidm_original_version + '"'))
        # Re-create turtle examples from template
        recompute_all_ex.main()
        # Upload to Prov Store
        UpdateExampleReadmes.main()
        # Update terms README
        UpdateTermReadme.main()
        # Update specifications

        # Replace address of examples
        owl_txt = owl_txt.replace(
            "https://raw.githubusercontent.com/incf-nidash/nidm/\
master/", "https://raw.githubusercontent.com/incf-nidash/nidm/\
NIDM-Results_" + self.nidm_original_version + "/")

        with open(release_owl_file, 'w') as fp:
            fp.write(owl_txt)

        create_pref(release_owl_file)
    def create_release(self):
        owl_file = os.path.join(TERMS_FOLDER, "nidm-results.owl")
        assert os.path.exists(owl_file)

        # Copy the owl file to the release folder
        release_owl_file = os.path.join(RELEASED_TERMS_FOLDER, "nidm-results_%s.owl" % (self.nidm_version))
        shutil.copyfile(owl_file, release_owl_file)

        with open(release_owl_file, "r") as fp:
            owl_txt = fp.read()

        # Remove imports and copy the import directly in the release file
        match = re.search(r"\[[\w:;\n\s]" + r"*(?P<imports>(\s*<.*>\s*,?\s*\n)*)\s*\] \.", owl_txt)
        if match:
            owl_txt = owl_txt.replace(match.group(), "")

            owl_imports = re.findall(r"<.*>", match.group("imports"))

            for im in owl_imports:
                im = im.replace("<", "").replace(">", "")
                im_name = self.get_import_name(im)
                name = im_name.split("-")[0].replace("_import", "")

                im_file = os.path.join(IMPORT_FOLDER, im_name + ".ttl")

                if os.path.exists(im_file):
                    with open(im_file, "r") as fp:
                        im_txt = fp.read()
                else:
                    response = urllib2.urlopen(im + ".ttl")
                    im_txt = response.read()

                # Replace prefix ":" by named namespace in import
                default_match = re.search(r"@prefix : <.*>", im_txt)
                if default_match:
                    im_txt = im_txt.replace(" :", " " + name + ":")
                    im_txt = im_txt.replace("\n:", "\n" + name + ":")

                # Remove base prefix
                base_match = re.search(r"@base <.*>", im_txt)
                if base_match:
                    im_txt = im_txt.replace(base_match.group(), "")

                # Remove description of subset ontology
                this_ontology_match = re.search(
                    r"<http://[^\n]*?> rdf:type owl:Ontology .*?\..*?###", im_txt, re.DOTALL
                )
                if this_ontology_match:
                    im_txt = im_txt.replace(this_ontology_match.group(), "###")

                # Copy missing prefixes in nidm-results owl file
                prefixes = re.findall(r"@prefix \w+: <.*> \.\n", im_txt)
                for prefix in prefixes:
                    if not prefix in owl_txt:
                        owl_txt = prefix + owl_txt
                    im_txt = im_txt.replace(prefix, "")

                owl_txt = owl_txt + "\n\n##### Imports from %s #####" % (name)
                owl_txt = owl_txt + im_txt

        # Remove AFNI-related terms (not ready for release yet)
        if int(self.nidm_version.split("-rc")[0]) <= 130:
            owl_txt = owl_txt.replace("@prefix afni: <http://purl.org/nidash/afni#> .\n", "")
            # Remove terms: nidm:'Legendre Polynomial Order', afni:'BLOCK',
            # afni:'GammaHRF' and afni:'LegendrePolynomialDriftModel'
            # and 'vertices' terms not yet in use
            terms_under_development = [
                NIDM["NIDM_0000014"],
                AFNI["BLOCK"],
                AFNI["GammaHRF"],
                AFNI["LegendrePolynomialDriftModel"],
                NIDM["NIDM_0000083"],
                NIDM["NIDM_0000137"],
                NIDM["NIDM_0000142"],
                NIDM["NIDM_0000158"],
                SPM["SPM_0000011"],
                SPM["SPM_0000012"],
            ]

            # Remove the reification property (to be further discussed with
            # STATO)
            terms_under_development += [
                OBO["IAO_0000136"],
                OBO["STATO_0000088"],
                OBO["STATO_0000129"],
                NIDM["NumberOfSubjectsReification"],
            ]
            it = 0
            for term in terms_under_development:
                m = True
                # For loop to insure that we replace all occurences of the term
                # (this can happen if a term is defined in an import and
                # further relations are added in the nidm-results.owl file)
                while m:
                    m = re.search(re.escape("###  " + str(term)) + r"[^\#]*\.", owl_txt)

                    if m:
                        owl_txt = owl_txt.replace(m.group(), "")
                        it = it + 1
                    elif it == 0:
                        raise Exception(str(term) + " not found")

        with open(release_owl_file, "w") as fp:
            fp.write(owl_txt)

        # Create specification (before the address of examples are updated to
        # avoid issue with examples pointing to tag not pushed yet)
        create_spec(self.nidm_original_version)
        image_dir_prev = os.path.join(SPECSPATH, "img", "nidm-results_dev")
        image_dir = os.path.join(SPECSPATH, "img", "nidm-results_" + self.nidm_version)
        if not os.path.isdir(image_dir):
            shutil.copytree(image_dir_prev, image_dir)

        # Update version number in examples and in script files
        script_files = glob.glob(os.path.join(NIDMRESULTSPATH, "scripts", "*.py"))
        for script in script_files:
            if not script.endswith("release_nidm_results.py"):
                with open(script, "r") as fp:
                    script_txt = fp.read()
                with open(script, "w") as fp:
                    fp.write(script_txt.replace('version="dev"', 'version="' + self.nidm_original_version + '"'))
        # Re-create turtle examples from template
        recompute_all_ex.main()
        # Upload to Prov Store
        UpdateExampleReadmes.main()
        # Update terms README
        UpdateTermReadme.main()
        # Update specifications

        # Replace address of examples
        owl_txt = owl_txt.replace(
            "https://raw.githubusercontent.com/incf-nidash/nidm/\
master/",
            "https://raw.githubusercontent.com/incf-nidash/nidm/\
NIDM-Results_"
            + self.nidm_original_version
            + "/",
        )

        with open(release_owl_file, "w") as fp:
            fp.write(owl_txt)

        create_pref(release_owl_file)
Exemple #7
0
    def create_release(self):
        owl_file = os.path.join(TERMS_FOLDER, 'nidm-results.owl')
        assert os.path.exists(owl_file)

        # Copy the owl file to the release folder
        release_owl_file = os.path.join(
            RELEASED_TERMS_FOLDER,
            "nidm-results_%s.owl" % (self.nidm_version))
        shutil.copyfile(owl_file, release_owl_file)

        with open(release_owl_file, 'r') as fp:
            owl_txt = fp.read()

        # Remove imports and copy the import directly in the release file
        match = re.search(
            r'\[[\w:;\n\s]' +
            r'*(?P<imports>(\s*<.*>\s*,?\s*\n)*)\s*\] \.', owl_txt)
        if match:
            owl_txt = owl_txt.replace(match.group(), "")

            owl_imports = re.findall(r"<.*>", match.group("imports"))

            for im in owl_imports:
                im = im.replace("<", "").replace(">", "")
                im_name = self.get_import_name(im)
                name = im_name.split("-")[0].replace("_import", "")

                im_file = os.path.join(IMPORT_FOLDER, im_name+".ttl")

                if os.path.exists(im_file):
                    with open(im_file, 'r') as fp:
                        im_txt = fp.read()
                else:
                    response = urllib2.urlopen(im+'.ttl')
                    im_txt = response.read()

                # Replace prefix ":" by named namespace in import
                default_match = re.search(r'@prefix : <.*>', im_txt)
                if default_match:
                    im_txt = im_txt.replace(" :", " "+name+":")
                    im_txt = im_txt.replace("\n:", "\n"+name+":")

                # Remove base prefix
                base_match = re.search(r'@base <.*>', im_txt)
                if base_match:
                    im_txt = im_txt.replace(base_match.group(), "")

                # Copy missing prefixes in nidm-results owl file
                prefixes = re.findall(r'@prefix \w+: <.*> \.\n', im_txt)
                for prefix in prefixes:
                    if not prefix in owl_txt:
                        owl_txt = prefix+owl_txt
                    im_txt = im_txt.replace(prefix, "")

                owl_txt = owl_txt + "\n\n##### Imports from %s #####" % (name)
                owl_txt = owl_txt + im_txt

        # Remove AFNI-related terms (not ready for release yet)
        if int(self.nidm_version) <= 110:
            owl_txt = owl_txt.replace(
                "@prefix afni: <http://purl.org/nidash/afni#> .\n", "")
            # Remove terms: nidm:'Legendre Polynomial Order', afni:'BLOCK',
            # afni:'GammaHRF' and afni:'LegendrePolynomialDriftModel'
            terms_under_development = [
                NIDM['NIDM_0000014'], AFNI['BLOCK'], AFNI['GammaHRF'],
                AFNI['LegendrePolynomialDriftModel']]
            for term in terms_under_development:
                m = re.search(
                    re.escape("###  "+str(term))+r"[^\#]*\.", owl_txt)
                owl_txt = owl_txt.replace(m.group(), "")

        with open(release_owl_file, 'w') as fp:
            fp.write(owl_txt)

        # Create specification (before the address of examples are updated to
        # avoid issue with examples pointing to tag not pushed yet)
        create_spec(self.nidm_original_version)
        if self.nidm_version == "100":
            image_dir_prev = os.path.join(
                SPECSPATH, "img", "nidm-results_100")
        image_dir = os.path.join(
            SPECSPATH, "img", "nidm-results_"+self.nidm_version)
        if not os.path.isdir(image_dir):
            shutil.copytree(image_dir_prev, image_dir)

        # Update version number in examples and in script files
        script_files = glob.glob(
            os.path.join(NIDMRESULTSPATH, "scripts", "*.py"))
        for script in script_files:
            if not script.endswith("release_nidm_results.py"):
                with open(script, 'r') as fp:
                    script_txt = fp.read()
                with open(script, 'w') as fp:
                        fp.write(
                            script_txt.replace(
                                'version="dev"',
                                'version="' + self.nidm_original_version +
                                '"'))
        # Re-create turtle examples from template
        recompute_all_ex.main()
        # Convert turtle to provn and upload to Prov Store
        UpdateExampleReadmes.main()
        # Update terms README
        UpdateTermReadme.main()
        # Update specifications

        # Replace address of examples
        owl_txt = owl_txt.replace(
            "https://raw.githubusercontent.com/incf-nidash/nidm/\
master/",
            "https://raw.githubusercontent.com/incf-nidash/nidm/\
NIDM-Results_"+self.nidm_original_version+"/")

        with open(release_owl_file, 'w') as fp:
            fp.write(owl_txt)
Exemple #8
0
Re-generate examples (based on templates), specification documents (based on owl 
files) and term README (based on owl files) 
@author: Camille Maumet <*****@*****.**>
@copyright: University of Warwick 2015
"""
import os, sys

REL_PATH = os.path.dirname(os.path.abspath(__file__))
NIDM_PATH = os.path.join(REL_PATH, os.pardir, "nidm")

for component in ["nidm-results", "nidm-experiment"]:
	COMPONENT_SCRIPTS = os.path.join(NIDM_PATH, component, "scripts")
	sys.path.append(COMPONENT_SCRIPTS)

import recompute_all_ex
import UpdateExampleReadmes
import UpdateTermReadme
import create_results_specification
import create_expe_specification

if __name__ == '__main__':
	# Re-create turtle examples from template
	recompute_all_ex.main()
	# Convert turtle to provn and upload to Prov Store
	UpdateExampleReadmes.main()
	# Update terms README
	UpdateTermReadme.main()
	# Update specifications
	create_results_specification.main("dev")
	create_expe_specification.main()