示例#1
0
 def test_save_import(self):
     ctx0 = Context(ident='http://example.com/context_0')
     ctx = Context(ident='http://example.com/context_1')
     new_ctx = Context(ident='http://example.com/context_1')
     ctx.add_import(new_ctx)
     ctx.save_imports(ctx0)
     self.assertEqual(len(ctx0), 1)
示例#2
0
 def test_clear(self):
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     for i in range(5):
         ctx.add_statement(create_mock_statement(ident_uri, i))
     ctx.clear()
     self.assertEqual(len(ctx), 0)
示例#3
0
def test_translate_data_source_loader(self):
    with connect(p(self.testdir, '.pow', 'pow.conf')) as conn:
        with transaction.manager:
            # Create data sources
            ctx = Context(ident='http://example.org/context', conf=conn.conf)
            ctx(LFDS)(
                ident='http://example.org/lfds',
                file_name='Merged_Nuclei_Stained_Worm.zip',
                torrent_file_name='d9da5ce947c6f1c127dfcdc2ede63320.torrent')

            class DT(DataTranslator):
                class_context = ctx.identifier
                input_type = LFDS
                output_type = LFDS
                translator_identifier = 'http://example.org/trans1'

                def translate(source):
                    print(source.full_path())
                    return source

            ctx.mapper.process_class(DT)
            dt = ctx(DT)()
            # Create a translator
            ctx_id = conn.conf['data_context_id']
            DT.definition_context.save(conn.conf['rdf.graph'])
            main_ctx = Context(ident=ctx_id, conf=conn.conf)
            main_ctx.add_import(ctx)
            main_ctx.save_imports()
            ctx.save()

    # Do translation
    assertRegexpMatches(
        self.sh(
            'pow translate http://example.org/trans1 http://example.org/lfds'),
        r'Merged_Nuclei_Stained_Worm.zip')
示例#4
0
def test_translator_list(self):
    expected = URIRef('http://example.org/trans1')
    with connect(p(self.testdir, '.pow', 'pow.conf')) as conn:
        with transaction.manager:
            # Create data sources
            ctx = Context(ident='http://example.org/context', conf=conn.conf)

            class DT(DataTranslator):
                class_context = ctx.identifier
                translator_identifier = expected

                def translate(source):
                    pass

            ctx.mapper.process_class(DT)

            DT.definition_context.save(conn.conf['rdf.graph'])
            # Create a translator
            dt = ctx(DT)()

            ctx_id = conn.conf['data_context_id']
            main_ctx = Context(ident=ctx_id, conf=conn.conf)
            main_ctx.add_import(ctx)
            main_ctx.save_imports()
            ctx.save()

    # List translators
    assertRegexpMatches(
        self.sh('pow translator list'),
        re.compile('^' + expected.n3() + '$', flags=re.MULTILINE))
示例#5
0
 def test_save_context(self):
     graph = set()
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     for i in range(5):
         ctx.add_statement(create_mock_statement(ident_uri, i))
     ctx.save_context(graph)
     self.assertEqual(len(graph), 5)
示例#6
0
    def test_context_store(self):
        class A(DataObject):
            pass

        ctx = Context(ident='http://example.com/context_1')
        ctx(A)(ident='anA')
        self.assertIn(URIRef('anA'),
                      tuple(x.identifier for x in ctx.query(A)().load()))
示例#7
0
    def __enter__(self):
        # if P.connected:
        #     raise RuntimeError("Already connected")
        P.connect(POW_CONF_PATH)
        self.ctx = Context(ident=WORM_IDENT)
        self.worm = self.ctx.stored(Worm)()
        self.net = self.worm.neuron_network()

        return self
示例#8
0
 def test_contents_triples(self):
     res_wanted = []
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     for i in range(5):
         stmt = create_mock_statement(ident_uri, i)
         ctx.add_statement(stmt)
         res_wanted.append(stmt.to_triple())
     for triples in ctx.contents_triples():
         self.assertTrue(triples in res_wanted)
示例#9
0
 def test_contents_triples(self):
     res_wanted = []
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     for i in range(5):
         stmt = create_mock_statement(ident_uri, i)
         ctx.add_statement(stmt)
         res_wanted.append(stmt.to_triple())
     for triples in ctx.contents_triples():
         self.assertTrue(triples in res_wanted)
示例#10
0
 def test_triples_saved_noundef_triples_counted(self):
     graph = set()
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     statement = MagicMock()
     statement.context.identifier = rdflib.term.URIRef(ident_uri)
     statement.to_triple.return_value = (Variable('var'), 1, 2)
     ctx.add_statement(statement)
     ctx.save_context(graph)
     self.assertEqual(ctx.triples_saved, 0)
示例#11
0
    def test_context_store(self):
        class A(DataObject):
            pass

        mapper = Mapper(base_class_names=(FCN(A),))
        mapper.add_class(A)
        ctx = Context(ident='http://example.com/context_1', mapper=mapper)
        ctx(A)(ident='anA')
        self.assertIn(URIRef('anA'),
                      tuple(x.identifier for x in ctx.mixed(A)().load()))
示例#12
0
class _DataTest(unittest.TestCase):
    def delete_dir(self):
        self.path = self.TestConfig['rdf.store_conf']
        try:
            if self.TestConfig['rdf.source'] == "Sleepycat":
                subprocess.call("rm -rf " + self.path, shell=True)
            elif self.TestConfig['rdf.source'] == "ZODB":
                delete_zodb_data_store(self.path)
        except OSError as e:
            if e.errno == 2:
                # The file may not exist and that's fine
                pass
            else:
                raise e

    @classmethod
    def setUpClass(cls):
        pass

    def setUp(self):
        # Set do_logging to True if you like walls of text
        self.TestConfig = Data.open(TEST_CONFIG)
        td = '__tempdir__'
        z = self.TestConfig['rdf.store_conf']
        if z.startswith(td):
            x = z[len(td):]
            h = tempfile.mkdtemp()
            self.TestConfig['rdf.store_conf'] = h + x
        self.delete_dir()
        PyOpenWorm.connect(conf=self.TestConfig, do_logging=False)
        self.context = Context(ident='http://example.org/test-context',
                               conf=self.TestConfig)
        typ = type(self)
        if hasattr(typ, 'ctx_classes'):
            if isinstance(dict, typ.ctx_classes):
                self.ctx = self.context(typ.ctx_classes)
            else:
                self.ctx = self.context(
                    {x.__name__: x
                     for x in typ.ctx_classes})

    def save(self):
        self.context.save_context()

    def tearDown(self):
        PyOpenWorm.disconnect()
        self.delete_dir()

    @property
    def config(self):
        return PyOpenWorm.config()
示例#13
0
 def test_save_import(self):
     ctx0 = Context(ident='http://example.com/context_0')
     ctx = Context(ident='http://example.com/context_1')
     new_ctx = Context(ident='http://example.com/context_1')
     ctx.add_import(new_ctx)
     ctx.save_imports(ctx0)
     self.assertEqual(len(ctx0), 1)
示例#14
0
    def test_statements_with_no_evidence(self):
        # Make the context that holds the statements.
        # These statements were not made in the setUp
        qctx = Context()
        qctx(Neuron)('AVAR').innexin('UNC-7')

        # Make the context we query statements from. This could be a 'staged'
        # context, but in this case we use what we've written to the IOMemory
        # store provided by _DataTest in self.conf['rdf.graph']
        ctx = Context(conf=self.conf).stored

        # Actually do the query
        ev_iterable = evidence_for(qctx, ctx)

        self.assertEqual(len(ev_iterable), 0)
示例#15
0
    def test_decontextualize(self):
        class A(DataObject):
            pass

        ctx = Context(ident='http://example.com/context_1')
        ctxda = ctx(A)(ident='anA')
        self.assertIsNone(ctxda.decontextualize().context)
示例#16
0
    def test_distinct_evidence_context(self):
        # Make the context that holds the statements.
        qctx = Context()
        qctx(Neuron)('AVAL').innexin('UNC-7')

        # Make the context we query statements from. This could be a 'staged'
        # context, but in this case we use what we've written to the IOMemory
        # store provided by _DataTest in self.conf['rdf.graph']
        ctx = Context(ident='http://example.org/statements', conf=self.conf).stored
        # Make the context that we query Evidence from
        evctx = Context(ident='http://example.org/metadata', conf=self.conf).stored

        # Actually do the query
        ev_iterable = evidence_for(qctx, ctx, evctx)

        self.assertEqual(len(ev_iterable), 1)
示例#17
0
    def test_statements_but_no_evidence(self):
        # Make the context that holds the statements.
        qctx = Context()
        qctx(Neuron)('AVAL').innexin('UNC-7')

        # Make the context we query statements from. This could be a 'staged'
        # context, but in this case we use what we've written to the IOMemory
        # store provided by _DataTest in self.conf['rdf.graph']
        ctx = Context(ident='http://example.org/statements', conf=self.conf).stored
        evctx = Context(ident='http://example.org/somerandomcontext', conf=self.conf).stored

        # Actually do the query
        ev_iterable = evidence_for(qctx, ctx, evctx)

        # Verify that there is at least one evidence object returned
        self.assertEqual(len(ev_iterable), 0)
示例#18
0
def evidence_for(qctx, ctx, evctx=None):
    """
     Returns an iterable of Evidence

    Parameters
    ----------
    qctx : object
        an object supported by evidence. If the object is a
        :class:`~PyOpenWorm.context.Context` with no identifier, then the query
        considers statements 'staged' (rather than stored) in the context
    ctx : Context
        Context that bounds where we look for statements about `qctx`. The
        contexts for statements found in this context are the actual targets of
        Evidence.supports statements.
    evctx : Context
        if the Evidence.supports statements should be looked for somewhere other
        than `ctx`, that can be specified in evctx. optional
"""
    if not evctx:
        evctx = ctx
    ctxs = query_context(ctx.rdf_graph(), qctx)
    ev_objs = []
    for c in ctxs:
        ev = evctx(Evidence)()
        ev.supports(Context(ident=c.identifier).rdf_object)
        for x in ev.load():
            ev_objs.append(x)
    return ev_objs
示例#19
0
 def test_connection_context(self):
     n0 = self.ctx.Neuron(name='NEURON0')
     n1 = self.ctx.Neuron(name='NEURON1')
     ctx1 = Context(ident='http://example.org/ctx1')
     n0.connection(
         self.ctx.Connection(pre_cell=n0, post_cell=n1, syntype='send'))
     self.assertEqual(set(), set(ctx1(n0).connection()))
 def setUp(self):
     xfail_without_db()
     self.conn = PyOpenWorm.connect(
         configFile='tests/data_integrity_test.conf')
     self.g = self.conn.conf["rdf.graph"]
     self.context = Context()
     self.qctx = self.context.stored
    def test_adding_data(self):
        # Setup a class imported by docs for demonstration purposes
        from PyOpenWorm.dataObject import DataObject, DatatypeProperty
        from PyOpenWorm.context import Context
        Load = lambda *args, **kwargs: [
            namedtuple('Record', ('pnum', 'flns', 'hrds'))(12, 1.0, 100)
        ]

        class Widget(DataObject):
            hardiness = DatatypeProperty()
            fullness = DatatypeProperty()
            part_number = DatatypeProperty()

            def identifier_augment(self):
                return self.make_identifier_direct(
                    str(self.part_number.onedef()))

            def defined_augment(self):
                return self.part_number.has_defined_value()

        ctx = Context(
            ident='http://example.org/data/imports/BDW_Widgets_2018-2019')
        ctx.mapper.process_class(Widget)

        ctx(Widget)(part_number=15)
        ctx(Widget)(part_number=17)
        ctx(Widget)(part_number=20)

        self.execute('adding_data',
                     extraglobs={
                         'Load': Load,
                         'Widget': Widget,
                         'ctx18': ctx
                     })
示例#22
0
    def test_retrieve(self):
        # Make the context that holds the statements. The identifier and whether
        # it's connected to a database doesn't matter here: it's just a
        # container for statements
        qctx = Context()
        qctx(Neuron)('AVAL').innexin('UNC-7')

        # Make the context we query statements from. This could be a 'staged'
        # context, but in this case we use what we've written to the IOMemory
        # store provided by _DataTest in self.conf['rdf.graph']
        ctx = Context(conf=self.conf).stored

        # Actually do the query
        ev_iterable = evidence_for(qctx, ctx)

        self.assertEqual(len(ev_iterable), 1)
示例#23
0
 def main(argument_namespace_callback, **kwargs):
     from PyOpenWorm.context import Context
     argument_namespace_callback.output_mode = 'json'
     m = Mock(name='context_result', spec=Context())
     m.identifier = 'ident'
     m.base_namespace = 'base_namespace'
     return m
示例#24
0
 def test_add_remove_statement(self):
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     stmt_to_remove = create_mock_statement(ident_uri, 42)
     for i in range(5):
         ctx.add_statement(create_mock_statement(ident_uri, i))
     ctx.add_statement(stmt_to_remove)
     ctx.remove_statement(stmt_to_remove)
     self.assertEqual(len(ctx), 5)
示例#25
0
class _DataTest(unittest.TestCase):

    def delete_dir(self):
        self.path = self.TestConfig['rdf.store_conf']
        try:
            if self.TestConfig['rdf.source'] == "Sleepycat":
                subprocess.call("rm -rf " + self.path, shell=True)
            elif self.TestConfig['rdf.source'] == "ZODB":
                delete_zodb_data_store(self.path)
        except OSError as e:
            if e.errno == 2:
                # The file may not exist and that's fine
                pass
            else:
                raise e

    def setUp(self):
        # Set do_logging to True if you like walls of text
        self.TestConfig = Data.open(TEST_CONFIG)
        td = '__tempdir__'
        z = self.TestConfig['rdf.store_conf']
        if z.startswith(td):
            x = z[len(td):]
            h = tempfile.mkdtemp()
            self.TestConfig['rdf.store_conf'] = h + x
        self.delete_dir()
        self.connection = PyOpenWorm.connect(conf=self.TestConfig, do_logging=False)
        self.context = Context(ident='http://example.org/test-context',
                               conf=self.TestConfig)
        typ = type(self)
        if hasattr(typ, 'ctx_classes'):
            if isinstance(dict, typ.ctx_classes):
                self.ctx = self.context(typ.ctx_classes)
            else:
                self.ctx = self.context({x.__name__: x for x in typ.ctx_classes})

    def save(self):
        self.context.save_context()

    def tearDown(self):
        PyOpenWorm.disconnect(self.connection)
        self.delete_dir()

    @property
    def config(self):
        return self.TestConfig
    conf = config
 def setUp(self):
     self.conn = PyOpenWorm.connect(
         configFile='tests/data_integrity_test.conf')
     self.conf = self.conn.conf
     self.g = self.conf["rdf.graph"]
     self.context = Context(ident="http://openworm.org/data",
                            conf=self.conf)
     self.qctx = self.context.stored
示例#27
0
    def test_inverse_property_context(self):
        class A(DataObject):
            def __init__(self, **kwargs):
                super(A, self).__init__(**kwargs)
                self.a = A.ObjectProperty(value_type=B)

        class B(DataObject):
            def __init__(self, **kwargs):
                super(B, self).__init__(**kwargs)
                self.b = B.ObjectProperty(value_type=A)
        InverseProperty(B, 'b', A, 'a')
        ctx1 = Context(ident='http://example.org/context_1')
        ctx2 = Context(ident='http://example.org/context_2')
        a = ctx1(A)(ident='a')
        b = ctx2(B)(ident='b')
        a.a(b)
        expected = (URIRef('b'), URIRef('http://openworm.org/entities/B/b'), URIRef('a'))
        self.assertIn(expected, list(ctx1.contents_triples()))
示例#28
0
 def setUp(self):
     # Make the statements and evidence we will query for in the test
     super(EvidenceForTest, self).setUp()
     c1 = Context(ident='http://example.org/statements', conf=self.conf)
     c1(Neuron)('AVAL').innexin('UNC-7')
     evc = Context(ident='http://example.org/metadata', conf=self.conf)
     ev1 = evc(Evidence)(key='js2019')
     ev1.supports(c1.rdf_object)
     # Save them
     c1.save_context()
     evc.save_context()
示例#29
0
 def setUp(self):
     # Set do_logging to True if you like walls of text
     self.TestConfig = Data.open(TEST_CONFIG)
     td = '__tempdir__'
     z = self.TestConfig['rdf.store_conf']
     if z.startswith(td):
         x = z[len(td):]
         h = tempfile.mkdtemp()
         self.TestConfig['rdf.store_conf'] = h + x
     self.delete_dir()
     self.connection = PyOpenWorm.connect(conf=self.TestConfig, do_logging=False)
     self.context = Context(ident='http://example.org/test-context',
                            conf=self.TestConfig)
     typ = type(self)
     if hasattr(typ, 'ctx_classes'):
         if isinstance(dict, typ.ctx_classes):
             self.ctx = self.context(typ.ctx_classes)
         else:
             self.ctx = self.context({x.__name__: x for x in typ.ctx_classes})
示例#30
0
 def test_add_remove_statement(self):
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     stmt_to_remove = create_mock_statement(ident_uri, 42)
     for i in range(5):
         ctx.add_statement(create_mock_statement(ident_uri, i))
     ctx.add_statement(stmt_to_remove)
     ctx.remove_statement(stmt_to_remove)
     self.assertEqual(len(ctx), 5)
示例#31
0
    def test_inverse_property_context(self):
        class A(DataObject):
            def __init__(self, **kwargs):
                super(A, self).__init__(**kwargs)
                self.a = A.ObjectProperty(value_type=B)

        class B(DataObject):
            def __init__(self, **kwargs):
                super(B, self).__init__(**kwargs)
                self.b = B.ObjectProperty(value_type=A)

        InverseProperty(B, 'b', A, 'a')
        ctx1 = Context(ident='http://example.org/context_1')
        ctx2 = Context(ident='http://example.org/context_2')
        a = ctx1(A)(ident='a')
        b = ctx2(B)(ident='b')
        a.a(b)
        expected = (URIRef('b'), URIRef('http://openworm.org/entities/B/b'),
                    URIRef('a'))
        self.assertIn(expected, list(ctx1.contents_triples()))
示例#32
0
 def test_clear(self):
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     for i in range(5):
         ctx.add_statement(create_mock_statement(ident_uri, i))
     ctx.clear()
     self.assertEqual(len(ctx), 0)
示例#33
0
 def test_save_context(self):
     graph = set()
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     for i in range(5):
         ctx.add_statement(create_mock_statement(ident_uri, i))
     ctx.save_context(graph)
     self.assertEqual(len(graph), 5)
示例#34
0
    def test_defined(self):
        class A(DataObject):
            def __init__(self, **kwargs):
                super(A, self).__init__(**kwargs)
                self.a = A.ObjectProperty(value_type=B)

            def defined_augment(self):
                return self.a.has_defined_value()

            def identifier_augment(self):
                return self.make_identifier(self.a.onedef().identifier.n3())

        class B(DataObject):
            def __init__(self, **kwargs):
                super(B, self).__init__(**kwargs)
                self.b = B.ObjectProperty(value_type=A)

        InverseProperty(B, 'b', A, 'a')
        ctx1 = Context(ident='http://example.org/context_1')
        ctx2 = Context(ident='http://example.org/context_2')
        a = ctx1(A)()
        b = ctx2(B)(ident='b')
        a.a(b)
        self.assertTrue(a.defined)
示例#35
0
 def test_triples_saved_noundef_triples_counted(self):
     graph = set()
     ident_uri = 'http://example.com/context_1'
     ctx = Context(ident=ident_uri)
     statement = MagicMock()
     statement.context.identifier = rdflib.term.URIRef(ident_uri)
     statement.to_triple.return_value = (Variable('var'), 1, 2)
     ctx.add_statement(statement)
     ctx.save_context(graph)
     self.assertEqual(ctx.triples_saved, 0)
示例#36
0
class POW:
    def __init__(self):
        self.ctx: Context = None
        self.worm: Worm = None
        self.net: Network = None

    def __enter__(self):
        # if P.connected:
        #     raise RuntimeError("Already connected")
        P.connect(POW_CONF_PATH)
        self.ctx = Context(ident=WORM_IDENT)
        self.worm = self.ctx.stored(Worm)()
        self.net = self.worm.neuron_network()

        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.ctx = None
        self.worm = None
        self.net = None
        P.disconnect()
示例#37
0
def read_data(include_nonconnected_cells=False):
    print_("Initialising OpenWormReader")

    with pyopenworm_connect() as conn:
        ctx = Context(ident="http://openworm.org/data", conf=conn.conf).stored
        #Extract the network object from the worm object.
        net = ctx(Worm)().neuron_network()
        all_connections = net.synapses()

        conns = []
        cells = []

        cell_names = get_cells_in_model(net)

        for s in all_connections:
            pre = str(s.pre_cell().name())
            post = str(s.post_cell().name())

            if isinstance(s.post_cell(),
                          Neuron) and pre in cell_names and post in cell_names:
                syntype = str(s.syntype())
                syntype = syntype[0].upper() + syntype[1:]
                num = int(s.number())
                synclass = str(s.synclass())
                ci = ConnectionInfo(pre, post, num, syntype, synclass)
                conns.append(ci)
                if pre not in cells:
                    cells.append(pre)
                if post not in cells:
                    cells.append(post)

        print_("Total cells %i (%i with connections)" %
               (len(cell_names), len(cells)))
        print_("Total connections found %i " % len(conns))

        if include_nonconnected_cells:
            return cell_names, conns
        else:
            return cells, conns
示例#38
0
def do_insert(ident,
              config="default.conf",
              logging=False,
              imports_context_ident=None,
              basedir=aux_data()):

    sources = init_sources()
    extras = init_extra_sources(basedir)
    data_sources_by_key = {x.key: x for x in sources + extras}
    trans_map = init_translators() + init_extra_neuron_data_translators(extras)
    P.connect(configFile=config, do_logging=logging)
    P.config()

    CTX = Context(ident=ident + '-data',
                  imported=(P.CONTEXT, ),
                  conf=P.config())

    EVCTX = Context(ident=ident + '-evidence',
                    imported=(P.CONTEXT, ),
                    conf=P.config())

    IWCTX = Context(ident=ident, imported=(CTX, EVCTX), conf=P.config())

    imports_context = Context(ident=imports_context_ident, conf=P.config())

    try:
        t0 = time()
        translators = dict()
        remaining = list(trans_map)
        last_remaining = None
        saved_contexts = set([])
        while remaining != last_remaining:
            next_remaining = []
            for t in remaining:
                if not isinstance(t[0], (list, tuple)):
                    source_keys = (t[0], )
                else:
                    source_keys = t[0]

                sources = tuple(
                    data_sources_by_key.get(s) for s in source_keys)
                if None in sources:
                    next_remaining.append(t)
                    continue
                translator_class = t[1]
                if len(t) > 2:
                    output_key = t[2]
                else:
                    output_key = None
                translator = translators.get(translator_class, None)
                if not translator:
                    translator = translator_class()
                    translators[translator_class] = translator

                print('\n'.join(
                    'Input({}/{}): {}'.format(i + 1, len(sources), s)
                    for i, s in enumerate(sources)))
                print('Translating with {}'.format(translator))
                orig_wd = os.getcwd()
                os.chdir(basedir)
                try:
                    res = translator(*sources, output_key=output_key)
                finally:
                    os.chdir(orig_wd)

                print('Result: {}'.format(res))
                if isinstance(res, DataWithEvidenceDataSource):
                    res.data_context.save_context(
                        inline_imports=True, saved_contexts=saved_contexts)
                    res.data_context.save_imports(imports_context)
                    res.evidence_context.save_context(
                        inline_imports=True, saved_contexts=saved_contexts)
                    res.evidence_context.save_imports(imports_context)
                    for ctx in res.contexts:
                        raise Exception()

                if res:
                    if res.key:
                        data_sources_by_key[res.key] = res
                    else:
                        data_sources_by_key[res.identifier] = res
            last_remaining = list(remaining)
            remaining = next_remaining
        for x in remaining:
            warn("Failed to process: {}".format(x))

        # attach_neuromlfiles_to_channel()

        t1 = time()
        print("Saving data...")
        graph = P.config('rdf.graph')
        for src in data_sources_by_key.values():
            if isinstance(src, DataWithEvidenceDataSource):
                print('saving', src)
                CTX.add_import(src.data_context)
                EVCTX.add_import(src.evidence_context)
                for ctx in src.contexts:
                    IWCTX.add_import(ctx)
        IWCTX.save_context(graph, saved_contexts=saved_contexts)
        IWCTX.save_imports(imports_context)
        print('imports context size', len(imports_context))
        print("Saved %d triples." % IWCTX.triples_saved)
        t2 = time()

        print("Serializing...")
        serialize_as_nquads()
        t3 = time()
        print("generating objects took", t1 - t0, "seconds")
        print("saving objects took", t2 - t1, "seconds")
        print("serializing objects took", t3 - t2, "seconds")

    except Exception:
        traceback.print_exc()
    finally:
        P.disconnect()
示例#39
0
 def setUp(self):
     PyOpenWorm.connect(configFile='tests/data_integrity_test.conf')
     self.g = PyOpenWorm.config("rdf.graph")
     self.context = Context()
     self.qctx = self.context.stored
示例#40
0
            lctx = frozenset(ctxs)
            continue
        if len(lctx) == 0:
            return frozenset()
        else:
            lctx = frozenset(ctxs) & lctx
            if len(lctx) == 0:
                return lctx
    return frozenset() if lctx is None else lctx


qctx = Context()
qctx(Neuron)('AVAL').innexin('UNC-7')
ctxs = query_context(conn.conf['rdf.graph'], qctx)
for c in ctxs:
    mqctx = Context(conf=conn.conf)
    print('CONTEXT', c.identifier)
    ev = mqctx.stored(Evidence)()
    ev.supports(Context(ident=c.identifier, conf=conn.conf).rdf_object)
    for x in ev.load():
        ref = x.reference()
        if isinstance(ref, Document):
            print(ref)
            print('AUTHOR:', ref.author())
            print('URI:', ref.uri())
            print('DOI:', ref.doi())
            print('PMID:', ref.pmid())
            print('WBID:', ref.wbid())
            print()
        elif isinstance(ref, Website):
            print(ref)
示例#41
0
 def test_triples_saved(self):
     graph = set()
     ident_uri = 'http://example.com/context_1'
     ident_uri2 = 'http://example.com/context_2'
     ident_uri2_1 = 'http://example.com/context_2_1'
     ident_uri3 = 'http://example.com/context_3'
     ident_uri4 = 'http://example.com/context_4'
     ctx = Context(ident=ident_uri)
     ctx2 = Context(ident=ident_uri2)
     ctx2_1 = Context(ident=ident_uri2_1)
     ctx.add_import(ctx2)
     ctx.add_import(ctx2_1)
     ctx3 = Context(ident=ident_uri3)
     ctx3.add_import(ctx)
     last_ctx = Context(ident=ident_uri4)
     last_ctx.add_import(ctx3)
     ctx.add_statement(create_mock_statement(ident_uri, 1))
     ctx2.add_statement(create_mock_statement(ident_uri2, 2))
     ctx2_1.add_statement(create_mock_statement(ident_uri2_1, 2.1))
     ctx3.add_statement(create_mock_statement(ident_uri3, 3))
     last_ctx.add_statement(create_mock_statement(ident_uri4, 4))
     last_ctx.save_context(graph, True)
     self.assertEqual(last_ctx.triples_saved, 5)
示例#42
0
def do_insert(ident, config="default.conf", logging=False, imports_context_ident=None, basedir=aux_data()):

    sources = init_sources()
    extras = init_extra_sources(basedir)
    data_sources_by_key = {x.key: x for x in sources + extras}
    trans_map = init_translators() + init_extra_neuron_data_translators(extras)
    P.connect(configFile=config, do_logging=logging)
    P.config()

    CTX = Context(ident=ident + '-data', imported=(P.CONTEXT,), conf=P.config())

    EVCTX = Context(ident=ident + '-evidence', imported=(P.CONTEXT,), conf=P.config())

    IWCTX = Context(ident=ident, imported=(CTX, EVCTX), conf=P.config())

    imports_context = Context(ident=imports_context_ident, conf=P.config())

    try:
        t0 = time()
        translators = dict()
        remaining = list(trans_map)
        last_remaining = None
        saved_contexts = set([])
        while remaining != last_remaining:
            next_remaining = []
            for t in remaining:
                if not isinstance(t[0], (list, tuple)):
                    source_keys = (t[0],)
                else:
                    source_keys = t[0]

                sources = tuple(data_sources_by_key.get(s) for s in source_keys)
                if None in sources:
                    next_remaining.append(t)
                    continue
                translator_class = t[1]
                if len(t) > 2:
                    output_key = t[2]
                else:
                    output_key = None
                translator = translators.get(translator_class, None)
                if not translator:
                    translator = translator_class()
                    translators[translator_class] = translator

                print('\n'.join('Input({}/{}): {}'.format(i + 1, len(sources), s) for i, s in enumerate(sources)))
                print('Translating with {}'.format(translator))
                orig_wd = os.getcwd()
                os.chdir(basedir)
                try:
                    res = translator(*sources, output_key=output_key)
                finally:
                    os.chdir(orig_wd)

                print('Result: {}'.format(res))
                if isinstance(res, DataWithEvidenceDataSource):
                    res.data_context.save_context(inline_imports=True, saved_contexts=saved_contexts)
                    res.data_context.save_imports(imports_context)
                    res.evidence_context.save_context(inline_imports=True, saved_contexts=saved_contexts)
                    res.evidence_context.save_imports(imports_context)
                    for ctx in res.contexts:
                        raise Exception()

                if res:
                    if res.key:
                        data_sources_by_key[res.key] = res
                    else:
                        data_sources_by_key[res.identifier] = res
            last_remaining = list(remaining)
            remaining = next_remaining
        for x in remaining:
            warn("Failed to process: {}".format(x))

        # attach_neuromlfiles_to_channel()

        t1 = time()
        print("Saving data...")
        graph = P.config('rdf.graph')
        for src in data_sources_by_key.values():
            if isinstance(src, DataWithEvidenceDataSource):
                print('saving', src)
                CTX.add_import(src.data_context)
                EVCTX.add_import(src.evidence_context)
                for ctx in src.contexts:
                    IWCTX.add_import(ctx)
        IWCTX.save_context(graph, saved_contexts=saved_contexts)
        IWCTX.save_imports(imports_context)
        print('imports context size', len(imports_context))
        print("Saved %d triples." % IWCTX.triples_saved)
        t2 = time()

        print("Serializing...")
        serialize_as_nquads()
        t3 = time()
        print("generating objects took", t1 - t0, "seconds")
        print("saving objects took", t2 - t1, "seconds")
        print("serializing objects took", t3 - t2, "seconds")

    except Exception:
        traceback.print_exc()
    finally:
        P.disconnect()
示例#43
0
 def test_add_statement_with_different_context(self):
     ctx = Context(ident='http://example.com/context_1')
     stmt1 = create_mock_statement('http://example.com/context_2', 1)
     with self.assertRaises(ValueError):
         ctx.add_statement(stmt1)
示例#44
0
 def test_save_context_no_graph(self):
     ctx = Context()
     with patch('PyOpenWorm.data.ALLOW_UNCONNECTED_DATA_USERS', False):
         with self.assertRaisesRegexp(Exception, r'graph'):
             ctx.save_context()
示例#45
0
 def test_context_setter(self):
     ctx = Context(ident='http://example.com/context_1')
     ctx.context = 42
     self.assertEqual(ctx.context, 42)
示例#46
0
    def test_triples_saved_multi(self):
        graph = set()
        ident_uri = 'http://example.com/context_1'
        ident_uri1 = 'http://example.com/context_11'
        ident_uri2 = 'http://example.com/context_12'
        ctx = Context(ident=ident_uri)
        ctx1 = Context(ident=ident_uri1)
        ctx2 = Context(ident=ident_uri2)
        ctx2.add_import(ctx)
        ctx1.add_import(ctx2)
        ctx1.add_import(ctx)

        ctx.add_statement(create_mock_statement(ident_uri, 1))
        ctx1.add_statement(create_mock_statement(ident_uri1, 3))
        ctx2.add_statement(create_mock_statement(ident_uri2, 2))
        ctx1.save_context(graph, inline_imports=True)
        self.assertEqual(ctx1.triples_saved, 3)
示例#47
0
 def test_zero_imports(self):
     ctx0 = Context(ident='http://example.com/context_0')
     ctx = Context(ident='http://example.com/context_1')
     ctx.save_imports(ctx0)
     self.assertEqual(len(ctx0), 0)
示例#48
0
from __future__ import absolute_import
from __future__ import print_function
import PyOpenWorm as P
from PyOpenWorm.evidence import Evidence
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.document import Document
from PyOpenWorm.data import Data
from PyOpenWorm.context import Context

# Create dummy database configuration.
d = Data({'rdf.source': 'ZODB'})

# Connect to database with dummy configuration
P.connect(conf=d)

ctx = Context(ident='http://example.org/data')
evctx = Context(ident='http://example.org/meta')

# Create a new Neuron object to work with
n = ctx(Neuron)(name='AVAL')

# Create a new Evidence object with `doi` and `pmid` fields populated.
# See `PyOpenWorm/evidence.py` for other available fields.
d = evctx(Document)(key='Anonymous2011', doi='125.41.3/ploscompbiol', pmid='12345678')
e = evctx(Evidence)(key='Anonymous2011', reference=d)

# Evidence object asserts something about the enclosed dataObject.
# Here we add a receptor to the Neuron we made earlier, and "assert it".
# As the discussion (see top) reads, this might be asserting the existence of
# receptor UNC-8 on neuron AVAL.
n.receptor('UNC-8')
示例#49
0
 def test_add_import(self):
     ctx0 = Context(ident='http://example.com/context_0')
     ctx = Context(ident='http://example.com/context_1')
     ctx2 = Context(ident='http://example.com/context_2')
     ctx2_1 = Context(ident='http://example.com/context_2_1')
     ctx.add_import(ctx2)
     ctx.add_import(ctx2_1)
     ctx3 = Context(ident='http://example.com/context_3')
     ctx3.add_import(ctx)
     final_ctx = Context(ident='http://example.com/context_1', imported=(ctx3,))
     final_ctx.save_imports(ctx0)
     self.assertEqual(len(ctx0), 4)