Beispiel #1
0
def serialize_as_n3():
    dest = '../WormData.n3'
    # XXX: Properties aren't initialized until the first object of a class is created,
    #      so we create them here

    P.config('rdf.graph').serialize(dest, format='n3')
    print('serialized to n3 file')
Beispiel #2
0
def serialize_as_n3():
    dest = "../WormData.n3"
    # XXX: Properties aren't initialized until the first object of a class is created,
    #      so we create them here

    P.config("rdf.graph").serialize(dest, format="n3")
    print("serialized to n3 file")
Beispiel #3
0
def serialize_as_n3():
    dest = '../WormData.n3'
    # XXX: Properties aren't initialized until the first object of a class is created,
    #      so we create them here

    P.config('rdf.graph').serialize(dest, format='n3')
    print('serialized to n3 file')
Beispiel #4
0
    def setUpClass(cls):
        import csv

        cls.neurons = [
        ]  #array that holds the names of the 302 neurons at class-level scope

        if not USE_BINARY_DB:
            PyOpenWorm.connect(conf=Data(
            ))  # Connect for integrity tests that use PyOpenWorm functions
            cls.g = PyOpenWorm.config(
                'rdf.graph')  # declare class-level scope for the database
            cls.g.parse("OpenWormData/WormData.n3",
                        format="n3")  # load in the database
        else:
            conf = Configure(**{
                "rdf.source": "ZODB",
                "rdf.store_conf": BINARY_DB
            })
            PyOpenWorm.connect(conf=conf)
            cls.g = PyOpenWorm.config('rdf.graph')

        #grab the list of the names of the 302 neurons

        csvfile = open('OpenWormData/aux_data/neurons.csv', 'r')
        reader = csv.reader(csvfile, delimiter=';', quotechar='|')

        for row in reader:
            if len(row[0]) > 0:  # Only saves valid neuron names
                cls.neurons.append(row[0])
Beispiel #5
0
def serialize_as_n3():
    dest = '../WormData.n3'
    # XXX: Properties aren't initialized until the first object of a class is created,
    #      so we create them here

    for x in dir(P):
        if isinstance(getattr(P, x), type) and issubclass(getattr(P, x), P.DataObject):
            c = getattr(P, x)
            if x == 'values':
                c("dummy")
            else:
                c()
    P.config('rdf.graph').serialize(dest, format='n3')
    print('serialized to n3 file')
Beispiel #6
0
def serialize_as_n3():
    dest = '../WormData.n3'
    # XXX: Properties aren't initialized until the first object of a class is created,
    #      so we create them here

    for x in dir(P):
        if isinstance(getattr(P, x), type) and issubclass(
                getattr(P, x), P.DataObject):
            c = getattr(P, x)
            if x == 'values':
                c("dummy")
            else:
                c()
    P.config('rdf.graph').serialize(dest, format='n3')
    print('serialized to n3 file')
 def setUp(self):
     PyOpenWorm.connect(conf=Configure(
         **{
             'rdf.store_conf': 'tests/test.db',
             'rdf.source': 'ZODB'
         }))
     self.g = PyOpenWorm.config("rdf.graph")
    def setUpClass(cls):
        import csv

        cls.neurons = [] #array that holds the names of the 302 neurons at class-level scope

        if not USE_BINARY_DB:
            PyOpenWorm.connect(conf=Data()) # Connect for integrity tests that use PyOpenWorm functions
            cls.g = PyOpenWorm.config('rdf.graph') # declare class-level scope for the database
            cls.g.parse("OpenWormData/WormData.n3", format="n3") # load in the database
        else:
            conf = Configure(**{ "rdf.source" : "ZODB", "rdf.store_conf" : BINARY_DB })
            PyOpenWorm.connect(conf=conf)
            cls.g = PyOpenWorm.config('rdf.graph')

        #grab the list of the names of the 302 neurons

        csvfile = open('OpenWormData/aux_data/neurons.csv', 'r')
        reader = csv.reader(csvfile, delimiter=';', quotechar='|')

        for row in reader:
            if len(row[0]) > 0: # Only saves valid neuron names
                cls.neurons.append(row[0])
 def setUp(self):
     PyOpenWorm.connect(configFile='tests/data_integrity_test.conf')
     self.g = PyOpenWorm.config("rdf.graph")
     self.context = Context()
     self.qctx = self.context.stored
Beispiel #10
0
def do_insert(ident,
              config="default.conf",
              logging=False,
              imports_context_ident=None,
              basedir=aux_data()):

    sources = init_sources()
    extras = init_extra_sources(basedir)
    data_sources_by_key = {x.key: x for x in sources + extras}
    trans_map = init_translators() + init_extra_neuron_data_translators(extras)
    P.connect(configFile=config, do_logging=logging)
    P.config()

    CTX = Context(ident=ident + '-data',
                  imported=(P.CONTEXT, ),
                  conf=P.config())

    EVCTX = Context(ident=ident + '-evidence',
                    imported=(P.CONTEXT, ),
                    conf=P.config())

    IWCTX = Context(ident=ident, imported=(CTX, EVCTX), conf=P.config())

    imports_context = Context(ident=imports_context_ident, conf=P.config())

    try:
        t0 = time()
        translators = dict()
        remaining = list(trans_map)
        last_remaining = None
        saved_contexts = set([])
        while remaining != last_remaining:
            next_remaining = []
            for t in remaining:
                if not isinstance(t[0], (list, tuple)):
                    source_keys = (t[0], )
                else:
                    source_keys = t[0]

                sources = tuple(
                    data_sources_by_key.get(s) for s in source_keys)
                if None in sources:
                    next_remaining.append(t)
                    continue
                translator_class = t[1]
                if len(t) > 2:
                    output_key = t[2]
                else:
                    output_key = None
                translator = translators.get(translator_class, None)
                if not translator:
                    translator = translator_class()
                    translators[translator_class] = translator

                print('\n'.join(
                    'Input({}/{}): {}'.format(i + 1, len(sources), s)
                    for i, s in enumerate(sources)))
                print('Translating with {}'.format(translator))
                orig_wd = os.getcwd()
                os.chdir(basedir)
                try:
                    res = translator(*sources, output_key=output_key)
                finally:
                    os.chdir(orig_wd)

                print('Result: {}'.format(res))
                if isinstance(res, DataWithEvidenceDataSource):
                    res.data_context.save_context(
                        inline_imports=True, saved_contexts=saved_contexts)
                    res.data_context.save_imports(imports_context)
                    res.evidence_context.save_context(
                        inline_imports=True, saved_contexts=saved_contexts)
                    res.evidence_context.save_imports(imports_context)
                    for ctx in res.contexts:
                        raise Exception()

                if res:
                    if res.key:
                        data_sources_by_key[res.key] = res
                    else:
                        data_sources_by_key[res.identifier] = res
            last_remaining = list(remaining)
            remaining = next_remaining
        for x in remaining:
            warn("Failed to process: {}".format(x))

        # attach_neuromlfiles_to_channel()

        t1 = time()
        print("Saving data...")
        graph = P.config('rdf.graph')
        for src in data_sources_by_key.values():
            if isinstance(src, DataWithEvidenceDataSource):
                print('saving', src)
                CTX.add_import(src.data_context)
                EVCTX.add_import(src.evidence_context)
                for ctx in src.contexts:
                    IWCTX.add_import(ctx)
        IWCTX.save_context(graph, saved_contexts=saved_contexts)
        IWCTX.save_imports(imports_context)
        print('imports context size', len(imports_context))
        print("Saved %d triples." % IWCTX.triples_saved)
        t2 = time()

        print("Serializing...")
        serialize_as_nquads()
        t3 = time()
        print("generating objects took", t1 - t0, "seconds")
        print("saving objects took", t2 - t1, "seconds")
        print("serializing objects took", t3 - t2, "seconds")

    except Exception:
        traceback.print_exc()
    finally:
        P.disconnect()
Beispiel #11
0
def serialize_as_nquads():
    P.config('rdf.graph').serialize('WormData.n4', format='nquads')
    print('serialized to nquads file')
Beispiel #12
0
 def config(self):
     return PyOpenWorm.config()
 def setUp(self):
     PyOpenWorm.connect(configFile='tests/data_integrity_test.conf')
     self.g = PyOpenWorm.config("rdf.graph")
     self.context = Context()
     self.qctx = self.context.stored
Beispiel #14
0
 def config(self):
     return PyOpenWorm.config()
 def setUp(self):
     PyOpenWorm.connect(
         conf=Configure(
             **{'rdf.store_conf': 'tests/test.db', 'rdf.source': 'ZODB'}))
     self.g = PyOpenWorm.config("rdf.graph")
import PyOpenWorm as P
P.connect('default.conf')
P.config()['rdf.graph'].serialize('../out.n3', format='n3')
Beispiel #17
0
def do_insert(ident, config="default.conf", logging=False, imports_context_ident=None, basedir=aux_data()):

    sources = init_sources()
    extras = init_extra_sources(basedir)
    data_sources_by_key = {x.key: x for x in sources + extras}
    trans_map = init_translators() + init_extra_neuron_data_translators(extras)
    P.connect(configFile=config, do_logging=logging)
    P.config()

    CTX = Context(ident=ident + '-data', imported=(P.CONTEXT,), conf=P.config())

    EVCTX = Context(ident=ident + '-evidence', imported=(P.CONTEXT,), conf=P.config())

    IWCTX = Context(ident=ident, imported=(CTX, EVCTX), conf=P.config())

    imports_context = Context(ident=imports_context_ident, conf=P.config())

    try:
        t0 = time()
        translators = dict()
        remaining = list(trans_map)
        last_remaining = None
        saved_contexts = set([])
        while remaining != last_remaining:
            next_remaining = []
            for t in remaining:
                if not isinstance(t[0], (list, tuple)):
                    source_keys = (t[0],)
                else:
                    source_keys = t[0]

                sources = tuple(data_sources_by_key.get(s) for s in source_keys)
                if None in sources:
                    next_remaining.append(t)
                    continue
                translator_class = t[1]
                if len(t) > 2:
                    output_key = t[2]
                else:
                    output_key = None
                translator = translators.get(translator_class, None)
                if not translator:
                    translator = translator_class()
                    translators[translator_class] = translator

                print('\n'.join('Input({}/{}): {}'.format(i + 1, len(sources), s) for i, s in enumerate(sources)))
                print('Translating with {}'.format(translator))
                orig_wd = os.getcwd()
                os.chdir(basedir)
                try:
                    res = translator(*sources, output_key=output_key)
                finally:
                    os.chdir(orig_wd)

                print('Result: {}'.format(res))
                if isinstance(res, DataWithEvidenceDataSource):
                    res.data_context.save_context(inline_imports=True, saved_contexts=saved_contexts)
                    res.data_context.save_imports(imports_context)
                    res.evidence_context.save_context(inline_imports=True, saved_contexts=saved_contexts)
                    res.evidence_context.save_imports(imports_context)
                    for ctx in res.contexts:
                        raise Exception()

                if res:
                    if res.key:
                        data_sources_by_key[res.key] = res
                    else:
                        data_sources_by_key[res.identifier] = res
            last_remaining = list(remaining)
            remaining = next_remaining
        for x in remaining:
            warn("Failed to process: {}".format(x))

        # attach_neuromlfiles_to_channel()

        t1 = time()
        print("Saving data...")
        graph = P.config('rdf.graph')
        for src in data_sources_by_key.values():
            if isinstance(src, DataWithEvidenceDataSource):
                print('saving', src)
                CTX.add_import(src.data_context)
                EVCTX.add_import(src.evidence_context)
                for ctx in src.contexts:
                    IWCTX.add_import(ctx)
        IWCTX.save_context(graph, saved_contexts=saved_contexts)
        IWCTX.save_imports(imports_context)
        print('imports context size', len(imports_context))
        print("Saved %d triples." % IWCTX.triples_saved)
        t2 = time()

        print("Serializing...")
        serialize_as_nquads()
        t3 = time()
        print("generating objects took", t1 - t0, "seconds")
        print("saving objects took", t2 - t1, "seconds")
        print("serializing objects took", t3 - t2, "seconds")

    except Exception:
        traceback.print_exc()
    finally:
        P.disconnect()
Beispiel #18
0
def serialize_as_nquads():
    P.config('rdf.graph').serialize('WormData.n4', format='nquads')
    print('serialized to nquads file')