예제 #1
0
    def handle(self, *args, **options):
        from rdflib.graph import Graph
        from rdflib import RDFS, RDF, OWL
        from rdflib.term import URIRef, BNode, Literal
        import yaml

        OboDefinition = URIRef('http://purl.obolibrary.org/obo/IAO_0000115')
        part_of_some = URIRef('http://purl.obolibrary.org/obo/BFO_0000050_some')
        Synonym = URIRef(u'http://www.geneontology.org/formats/oboInOwl#hasSynonym')

        g = Graph()
        g.parse(args[1], 'xml')

        Model = MuscleOwl if args[0] == 'm' else BehaviorOwl

        # first pass, add the things
        for subject in nonblankthings(g, g.subjects()):
            slabel = g.label(subject)
            m = get_model_instance(Model, unicode(subject))
            m.uri = unicode(subject)
            m.label = unicode(slabel)

            m.rdfs_is_class = (subject, RDF.type, OWL.Class) in g
            m.obo_definition = unicode(g.value(subject, OboDefinition, None))
            m.rdfs_comment = unicode(g.value(subject, RDFS.comment, None))
            synonyms = [unicode(syn) for syn in g.objects(subject, Synonym)]
            if len(synonyms):
                m.synonyms_comma_separated = ", ".join(synonyms)
            else:
                m.synonyms_comma_separated = None

            m.save()

        # second pass, add the relationships
        for subject in nonblankthings(g, g.subjects()):
            slabel = g.label(subject)
            m = Model.objects.get(uri=unicode(subject))

            m.rdfs_subClassOf_ancestors.clear()
            # add all super-classes to m.rdfs_subClassOf_ancestors
            for obj in nonblankthings(g, g.transitive_objects(subject, RDFS.subClassOf)):
                if obj != subject:
                    a = Model.objects.get(uri=unicode(obj))
                    m.rdfs_subClassOf_ancestors.add(a)

            m.bfo_part_of_some.clear()
            # add all things that this thing is part of to m.bfo_part_of_some
            for obj in nonblankthings(g, g.objects(subject, part_of_some)):
                if obj != subject:
                    a = Model.objects.get(uri=unicode(obj))
                    m.bfo_part_of_some.add(a)

            # add only direct super-classes to m.rdfs_subClassOf
            #for obj in nonblankthings(g, g.objects(subject, RDFS.subClassOf)):
            #    if obj != subject:
            #        a = Model.objects.get(uri=unicode(obj))
            #        m.rdfs_subClassOf.add(a)

            m.save()
예제 #2
0
def ExtractTextFeatures():
    id2text = open("../dataset/FGraph_ID_Text.txt", "a", encoding="utf-8")
    NodeID = open("../dataset/FGraph_Origin_Node_ID.txt",
                  "r",
                  encoding="utf-8")
    count = 0
    for line in NodeID.readlines():
        if count < 36661:
            count += 1
            continue
        if count >= 81000: break
        uri, id = line.strip().split("\t\t")
        text = ""
        if match(uri, rule='IRI_reference') is None: text = uri
        elif len(uri.split("#")) > 1:
            text = re.sub(r"(\w)([A-Z])", r"\1 \2", (uri.split("#"))[-1])
        else:
            g = Graph()
            try:
                g.parse(uri)
                text = g.label(URIRef(uri))
            except:
                print("不能解析!")
            if text == "":
                uri_s1 = uri.split("resource/")
                if len(uri_s1) > 1: text = uri_s1[-1]
                else:
                    uri_s2 = (uri.split("/"))[-1]
                    if uri_s2 != "": text = uri_s2
                    else: text = uri
        id2text.write(id + "\t\t" + text + "\n")
        print(count)
        count += 1
    id2text.close()
    NodeID.close()
예제 #3
0
def ExtractTextFeatures():
    uri2label = GetEList()
    id2text = open("../dataset/Graph_ID_Text.txt", "w", encoding="utf-8")
    NodeID = open("../dataset/Graph_Origin_Node_ID.txt", "r", encoding="utf-8")
    count = 0
    for line in NodeID.readlines():
        uri, id = line.strip().split("\t\t")
        text = ""
        if match(uri, rule='IRI_reference') is None: text = uri
        elif uri in uri2label.keys(): text = uri2label[uri]
        elif len(uri.split("#")) > 1:
            text = re.sub(r"(\w)([A-Z])", r"\1 \2", (uri.split("#"))[-1])
        else:
            g = Graph()
            try:
                g.parse(uri)
                text = g.label(URIRef(uri))
            except:
                print("不能解析!")
            if text == "":
                uri_s = uri.split("resource/")
                if len(uri_s) > 1: text = uri_s[-1]
                else: text = (uri.split("/"))[-1]
        id2text.write(id + "\t\t" + text + "\n")
        print(count)
        count += 1
    id2text.close()
    NodeID.close()
예제 #4
0
                if covar == NIDM_INDEPENDENT_ERROR:
                    covar = ""
                else:
                    if covar_spatial == NIDM_SPATIALLY_LOCAL_MODEL:
                        covar_spatial = "local"
                    elif covar_spatial == NIDM_SPATIALLY_GLOBAL_MODEL:
                        covar_spatial = "global"
                    elif covar_spatial == NIDM_SPATIALLY_REGULARIZED_MODEL:
                        covar_spatial = "spatially regularized"
                    else:
                        raise Exception(
                            'Unknown spatial variance estimation: ' +
                            str(covar_spatial))
                    covar = " and a " + covar_spatial + " " + \
                        owl_graph.label(covar)

                if drift_model:
                    drift_model = "Drift was fit with a " + \
                        owl_graph.label(drift_model).lower()
                    if spm_drift_cutoff:
                        drift_model = drift_model + \
                            " (" + spm_drift_cutoff + "s cut-off)."
                    if fsl_drift_cutoff:
                        drift_model = drift_model + \
                            " (" + fsl_drift_cutoff + "s FWHM)."
                else:
                    drift_model = ""

                print "-------------------"
                print "%s-level analysis was performed with %s (version %s). \
예제 #5
0
                clus_thresh, unused = threshold_txt(owl_graph,
                                                    height_thresh_type,
                                                    height_value, stat_type)
                thresh += " with a cluster defining threshold " + clus_thresh

            else:
                inference_type = "Voxel-wise"
                thresh, multiple_compa = threshold_txt(owl_graph,
                                                       height_thresh_type,
                                                       height_value, stat_type)
                thresh += " and clusters smaller than %d were discarded" \
                          % int(extent_value)

            if homoscedasticity:
                variance = 'equal'
            else:
                variance = 'unequal'

            print "-------------------"
            print "Group statistic was performed in %s (version %s). \
%s was performed assuming %s variances. %s inference \
was performed %susing a threshold %s. The search volume was %d cm^3 \
(%d voxels)." % (owl_graph.label(software), soft_version,
                 owl_graph.label(est_method).capitalize(),
                 variance, inference_type, multiple_compa, thresh,
                 float(search_vol_units) / 1000, int(search_vol_vox))
            print "-------------------"

    else:
        print "Query returned no results."