async def test_synsplice_remote(self): async with self.getTestDmon(mirror='dmoncoreauth') as dmon: host, port = dmon.addr curl = f'tcp://*****:*****@{host}:{port}/core' async with await s_telepath.openurl(curl) as core: await self.addCreatorDeleterRoles(core) await core.addUserRole('root', 'creator') with self.getTestDir() as dirn: def testmain(): mesg = ('node:add', {'ndef': ('teststr', 'foo')}) splicefp = s_common.genpath(dirn, 'splice.mpk') with s_common.genfile(splicefp) as fd: fd.write(s_msgpack.en(mesg)) argv = [ '--cortex', curl, '--format', 'syn.splice', '--modules', 'synapse.tests.utils.TestModule', splicefp ] outp = self.getTestOutp() self.eq(s_feed.main(argv, outp=outp), 0) with self.getTestProxy(dmon, 'core', **pconf) as core: self.len(1, list(core.eval('teststr=foo'))) s_coro.executor(testmain)
async def test_link_file(self): link0, file0 = await s_link.linkfile('rb') def reader(fd): byts = fd.read() fd.close() return byts coro = s_coro.executor(reader, file0) await link0.send(b'asdf') await link0.send(b'qwer') await link0.fini() self.eq(b'asdfqwer', await coro) link1, file1 = await s_link.linkfile('wb') def writer(fd): fd.write(b'asdf') fd.write(b'qwer') fd.close() coro = s_coro.executor(writer, file1) byts = b'' while True: x = await link1.recv(1000000) if not x: break byts += x await coro self.eq(b'asdfqwer', byts)
async def test_linksock(self): link0, sock0 = await s_link.linksock() self.isinstance(link0, s_link.Link) self.isinstance(sock0, socket.socket) def reader(sock): buf = b'' while True: byts = sock.recv(1024) if not byts: break buf += byts return buf coro = s_coro.executor(reader, sock0) await link0.send(b'part1') await link0.send(b'qwer') await link0.fini() self.eq(b'part1qwer', await coro) sock0.close() link1, sock1 = await s_link.linksock() def writer(sock): sock.sendall(b'part2') sock.sendall(b'qwer') sock.shutdown(socket.SHUT_WR) coro = s_coro.executor(writer, sock1) await coro self.eq(b'part2qwer', await link1.recvsize(9)) await link1.fini() sock1.close()
async def genCaCert(self, network): path = self.certdir.getCaCertPath(network) if path is not None: with open(path, 'rb') as fd: return fd.read().decode() logger.info(f'Generating CA certificate for {network}') fut = s_coro.executor(self.certdir.genCaCert, network, save=False) pkey, cert = await fut cakey = self.certdir._pkeyToByts(pkey).decode() cacert = self.certdir._certToByts(cert).decode() # nexusify storage.. await self.saveCaCert(network, cakey, cacert) return cacert
async def test_executor(self): def func(*args, **kwargs): tid = threading.get_ident() return tid, args, kwargs future = s_coro.executor(func, 1, key='valu') tid, args, kwargs = await future # Ensure that we were not executed on the ioloop thread self.ne(tid, s_glob._glob_thrd.ident) # Ensure that args are passed as expected self.eq(args, (1, )) self.eq(kwargs, {'key': 'valu'}) async def afunc(): tid = threading.get_ident() return tid # Ensure a generic coroutine is executed on the ioloop thread self.eq(s_glob._glob_thrd.ident, await afunc())
async def test_executor(self): def func(*args, **kwargs): tid = threading.get_ident() return tid, args, kwargs future = s_coro.executor(func, 1, key='valu') tid, args, kwargs = await future # Ensure that we were not executed on the ioloop thread self.ne(tid, s_glob._glob_thrd.ident) # Ensure that args are passed as expected self.eq(args, (1,)) self.eq(kwargs, {'key': 'valu'}) async def afunc(): tid = threading.get_ident() return tid # Ensure a generic coroutine is executed on the ioloop thread self.eq(s_glob._glob_thrd.ident, await afunc())
async def __anit__(self, path, **kwargs): await s_base.Base.__anit__(self) kwargs.setdefault('map_size', self.DEFAULT_MAPSIZE) kwargs.setdefault('lockmemory', False) kwargs.setdefault('map_async', True) opts = kwargs self.path = pathlib.Path(path) self.optspath = self.path.with_suffix('.opts.yaml') if self.optspath.exists(): opts.update(s_common.yamlload(self.optspath)) initial_mapsize = opts.get('map_size') if initial_mapsize is None: raise s_exc.BadArg('Slab requires map_size') mdbpath = self.path / 'data.mdb' if mdbpath.exists(): mapsize = max(initial_mapsize, os.path.getsize(mdbpath)) else: mapsize = initial_mapsize # save the transaction deltas in case of error... self.xactops = [] self.recovering = False opts.setdefault('max_dbs', 128) opts.setdefault('writemap', True) self.maxsize = opts.pop('maxsize', None) self.growsize = opts.pop('growsize', self.DEFAULT_GROWSIZE) self.readonly = opts.get('readonly', False) self.lockmemory = opts.pop('lockmemory', False) opts.setdefault('map_async', True) self.mapsize = _mapsizeround(mapsize) if self.maxsize is not None: self.mapsize = min(self.mapsize, self.maxsize) self._saveOptsFile() self.lenv = lmdb.open(path, **opts) self.scans = set() self.dirty = False if self.readonly: self.xact = None self.txnrefcount = 0 else: self._initCoXact() self.resizeevent = threading.Event( ) # triggered when a resize event occurred self.lockdoneevent = asyncio.Event( ) # triggered when a memory locking finished # LMDB layer uses these for status reporting self.locking_memory = False self.prefaulting = False self.max_could_lock = 0 self.lock_progress = 0 self.lock_goal = 0 if self.lockmemory: async def memlockfini(): self.resizeevent.set() await self.memlocktask self.memlocktask = s_coro.executor(self._memorylockloop) self.onfini(memlockfini) self.dbnames = { None: (None, False) } # prepopulate the default DB for speed self.onfini(self._onCoFini) if not self.readonly: self.schedCoro(self._runSyncLoop())
async def __anit__(self, path, **kwargs): await s_base.Base.__anit__(self) kwargs.setdefault('map_size', self.DEFAULT_MAPSIZE) kwargs.setdefault('lockmemory', False) kwargs.setdefault('map_async', True) opts = kwargs self.path = path self.optspath = s_common.switchext(path, ext='.opts.yaml') # Make sure we don't have this lmdb DB open already. (This can lead to seg faults) if path in self.allslabs: raise s_exc.SlabAlreadyOpen(mesg=path) if os.path.isfile(self.optspath): opts.update(s_common.yamlload(self.optspath)) initial_mapsize = opts.get('map_size') if initial_mapsize is None: raise s_exc.BadArg('Slab requires map_size') mdbpath = s_common.genpath(path, 'data.mdb') if os.path.isfile(mdbpath): mapsize = max(initial_mapsize, os.path.getsize(mdbpath)) else: mapsize = initial_mapsize # save the transaction deltas in case of error... self.xactops = [] self.max_xactops_len = opts.pop('max_replay_log', 10000) self.recovering = False opts.setdefault('max_dbs', 128) opts.setdefault('writemap', True) self.maxsize = opts.pop('maxsize', None) self.growsize = opts.pop('growsize', self.DEFAULT_GROWSIZE) self.readonly = opts.get('readonly', False) self.lockmemory = opts.pop('lockmemory', False) if self.lockmemory: lockmem_override = s_common.envbool('SYN_LOCKMEM_DISABLE') if lockmem_override: logger.info(f'SYN_LOCKMEM_DISABLE envar set, skipping lockmem for {self.path}') self.lockmemory = False self.mapasync = opts.setdefault('map_async', True) self.mapsize = _mapsizeround(mapsize) if self.maxsize is not None: self.mapsize = min(self.mapsize, self.maxsize) self._saveOptsFile() self.lenv = lmdb.open(str(path), **opts) self.allslabs[path] = self self.scans = set() self.dirty = False if self.readonly: self.xact = None self.txnrefcount = 0 else: self._initCoXact() self.resizeevent = threading.Event() # triggered when a resize event occurred self.lockdoneevent = asyncio.Event() # triggered when a memory locking finished # LMDB layer uses these for status reporting self.locking_memory = False self.prefaulting = False self.memlocktask = None self.max_could_lock = 0 self.lock_progress = 0 self.lock_goal = 0 if self.lockmemory: async def memlockfini(): self.resizeevent.set() await self.memlocktask self.memlocktask = s_coro.executor(self._memorylockloop) self.onfini(memlockfini) else: self.lockdoneevent.set() self.dbnames = {None: (None, False)} # prepopulate the default DB for speed self.onfini(self._onSlabFini) self.commitstats = collections.deque(maxlen=1000) # stores Tuple[time, replayloglen, commit time delta] if not self.readonly: await Slab.initSyncLoop(self)