def test_send_data(self): # Test that sending a lot of data works. proc = Process() proc.spawn('catn', stdin=PIPE, stdout=PIPE) buf = b'x' * 1024 nbytes = 10 * 1024 * 1024 # Send 10MB result = [0, 0] def writer(): while result[0] < nbytes: towrite = min(1024, nbytes - result[0]) proc.stdin.write(buf[:towrite]) result[0] += towrite proc.stdin.flush() proc.stdin.write_eof() def reader(): while True: read = len(proc.stdout.read1(1024)) if read == 0: break result[1] += read gruvi.spawn(writer) gruvi.spawn(reader).join() proc.wait() self.assertEqual(proc.returncode, 0) self.assertEqual(result[0], nbytes) self.assertEqual(result[1], nbytes) proc.close()
def test_echo_data(self): # Echo a bunch of data and ensure it is echoed identically server = StreamServer(echo_handler) server.listen(('127.0.0.1', 0)) client = StreamClient() client.connect(server.addresses[0]) md1 = hashlib.sha256() md2 = hashlib.sha256() def produce(): for i in range(1000): chunk = os.urandom(1024) client.write(chunk) md1.update(chunk) client.write_eof() def consume(): while True: buf = client.read1(1024) if not buf: break md2.update(buf) f1 = gruvi.spawn(produce) f2 = gruvi.spawn(consume) f1.join(); f2.join() self.assertEqual(md1.digest(), md2.digest()) server.close() client.close()
def create_vault(self, template): """Create a new vault. The vault's name will be *name*, and its private keys will be encrypted with *password*. The vault will start unlocked. Vault creation is asynchronous. This method will start the process in the background and return immediately with the vault's uuid. When the vault's creation is complete, a "VaultCreationComplete" notification is sent with arguments (uuid, success, message). """ uuid = crypto.random_uuid() template["id"] = uuid def complete_create_vault(send_notification): try: vault = self._model.create_vault(template) except Exception as e: self._log.error(str(e)) success = False info = errors.get_error_info(e) message = info.message if info is not None else "" else: success = True message = "vault created successfully" send_notification("VaultCreationComplete", uuid, success, message) # Pass reference to send_notification because self.protocol is fiber-local gruvi.spawn(complete_create_vault, self.protocol.send_notification) return uuid
def test_isolation(self): local = gruvi.local() interleaved = [] def fiber1(): local.foo = 10 interleaved.append(1) gruvi.sleep(0) self.assertEqual(local.foo, 10) local.foo = 30 interleaved.append(1) gruvi.sleep(0) self.assertEqual(local.foo, 30) def fiber2(): self.assertFalse(hasattr(local, 'foo')) local.foo = 20 interleaved.append(2) gruvi.sleep(0) self.assertEqual(local.foo, 20) local.foo = 40 interleaved.append(2) gruvi.sleep(0) self.assertEqual(local.foo, 40) f1 = gruvi.spawn(fiber1) f2 = gruvi.spawn(fiber2) f1.join(); f2.join() self.assertFalse(hasattr(local, 'foo')) self.assertEqual(interleaved, [1, 2, 1, 2])
def test_produce_consume(self): # Ensure that there's no deadlocks when pushing a large number of items # through a queue with a fixed size. queue = gruvi.Queue(maxsize=10) result = [] sizes = [] def consumer(n): for i in range(n): queue.put(i) sizes.append(queue.qsize()) def producer(n): for i in range(n): result.append(queue.get()) sizes.append(queue.qsize()) ni = 2000 fcons = gruvi.spawn(consumer, ni) fprod = gruvi.spawn(producer, ni) fcons.join() fprod.join() self.assertEqual(len(result), ni) self.assertEqual(result, list(range(ni))) self.assertLessEqual(max(sizes), 10)
def test_send_data(self): # Test that sending a lot of data works. proc = Process() proc.spawn('catn', stdin=PIPE, stdout=PIPE) buf = b'x' * 1024 nbytes = 10*1024*1024 # Send 10MB result = [0, 0] def writer(): while result[0] < nbytes: towrite = min(1024, nbytes - result[0]) proc.stdin.write(buf[:towrite]) result[0] += towrite proc.stdin.flush() proc.stdin.write_eof() def reader(): while True: read = len(proc.stdout.read1(1024)) if read == 0: break result[1] += read gruvi.spawn(writer) gruvi.spawn(reader).join() proc.wait() self.assertEqual(proc.returncode, 0) self.assertEqual(result[0], nbytes) self.assertEqual(result[1], nbytes) proc.close()
def test_echo_data(self): # Echo a bunch of data and ensure it is echoed identically server = StreamServer(echo_handler) server.listen(('127.0.0.1', 0)) client = StreamClient() client.connect(server.addresses[0]) md1 = hashlib.sha256() md2 = hashlib.sha256() def produce(): for i in range(1000): chunk = os.urandom(1024) client.write(chunk) md1.update(chunk) client.write_eof() def consume(): while True: buf = client.read1(1024) if not buf: break md2.update(buf) f1 = gruvi.spawn(produce) f2 = gruvi.spawn(consume) f1.join() f2.join() self.assertEqual(md1.digest(), md2.digest()) server.close() client.close()
def test_get_wait(self): # Queue.get() should wait until an item becomes available. queue = gruvi.Queue() def put_queue(value): gruvi.sleep(0.01) queue.put(value) gruvi.spawn(put_queue, 'foo') self.assertEqual(queue.get(), 'foo')
def test_partial_decode_wait(self): reader = StreamReader() wrapped = TextIOWrapper(reader, 'utf-8') buf = u'20 \u20ac'.encode('utf-8') reader.feed(buf[:-1]) def write_last(): gruvi.sleep(0.01) reader.feed(buf[-1:]) gruvi.spawn(write_last) self.assertEqual(wrapped.read(4), u'20 \u20ac')
def test_partial_decode_wait(self): stream = Stream(None) wrapped = stream.wrap('utf-8') buf = u'20 \u20ac'.encode('utf-8') stream.buffer.feed(buf[:-1]) def write_last_byte(): gruvi.sleep(0.01) stream.buffer.feed(buf[-1:]) gruvi.spawn(write_last_byte) self.assertEqual(wrapped.read(4), u'20 \u20ac')
def test_read_wait(self): reader = StreamReader() reader.feed(b'foo') def write_more(): gruvi.sleep(0.01) reader.feed(b'bar') gruvi.sleep(0.01) reader.feed_eof() gruvi.spawn(write_more) self.assertEqual(reader.read(), b'foobar') self.assertEqual(reader.read(), b'')
def test_read_wait_error(self): reader = StreamReader() reader.feed(b'foo') def write_more(): gruvi.sleep(0.01) reader.feed(b'bar') gruvi.sleep(0.01) reader.feed_error(RuntimeError) gruvi.spawn(write_more) self.assertEqual(reader.read(), b'foobar') self.assertRaises(RuntimeError, reader.read)
def test_read_wait_error(self): stream = Stream(None) stream.buffer.feed(b'foo') def write_more(): gruvi.sleep(0.01) stream.buffer.feed(b'bar') gruvi.sleep(0.01) stream.buffer.feed_error(RuntimeError) gruvi.spawn(write_more) self.assertEqual(stream.read(), b'foobar') self.assertRaises(RuntimeError, stream.read)
def test_readlines_wait_eof(self): stream = Stream(None) stream.buffer.feed(b'foo\n') def write_more(): gruvi.sleep(0.01) stream.buffer.feed(b'bar\n') gruvi.sleep(0.01) stream.buffer.feed_eof() gruvi.spawn(write_more) self.assertEqual(stream.readlines(), [b'foo\n', b'bar\n']) self.assertEqual(stream.readlines(), [])
def test_read_wait(self): stream = Stream(None) stream.buffer.feed(b'foo') def write_more(): gruvi.sleep(0.01) stream.buffer.feed(b'bar') gruvi.sleep(0.01) stream.buffer.feed_eof() gruvi.spawn(write_more) self.assertEqual(stream.read(), b'foobar') self.assertEqual(stream.read(), b'')
def test_readlines_wait_eof(self): reader = StreamReader() reader.feed(b'foo\n') def write_more(): gruvi.sleep(0.01) reader.feed(b'bar\n') gruvi.sleep(0.01) reader.feed_eof() gruvi.spawn(write_more) self.assertEqual(reader.readlines(), [b'foo\n', b'bar\n']) self.assertEqual(reader.readlines(), [])
def test_wait(self): event = gruvi.Event() done = [] def waiter(): done.append(False) done.append(event.wait()) gruvi.spawn(waiter) gruvi.sleep(0) self.assertEqual(done, [False]) event.set() gruvi.sleep(0) self.assertEqual(done, [False, True])
def test_iter_wait_error(self): reader = StreamReader() reader.feed(b'foo\n') def write_more(): gruvi.sleep(0.01) reader.feed(b'bar\n') gruvi.sleep(0.01) reader.feed_error(RuntimeError) gruvi.spawn(write_more) it = iter(reader) self.assertEqual(six.next(it), b'foo\n') self.assertEqual(six.next(it), b'bar\n') self.assertRaises(RuntimeError, six.next, it)
def test_join_wait(self): # Ensure that join() waits until all unfished tasks are done. queue = gruvi.Queue() self.assertEqual(queue.unfinished_tasks, 0) queue.put('foo') def consumer(): queue.get() gruvi.sleep(0.01) queue.task_done() gruvi.spawn(consumer) self.assertEqual(queue.unfinished_tasks, 1) queue.join() self.assertEqual(queue.unfinished_tasks, 0)
def test_iter_wait_eof(self): reader = StreamReader() reader.feed(b'foo\n') def write_more(): gruvi.sleep(0.01) reader.feed(b'bar\n') gruvi.sleep(0.01) reader.feed_eof() gruvi.spawn(write_more) it = iter(reader) self.assertEqual(six.next(it), b'foo\n') self.assertEqual(six.next(it), b'bar\n') self.assertRaises(StopIteration, six.next, it)
def test_iter_wait_error(self): stream = Stream(None) stream.buffer.feed(b'foo\n') def write_more(): gruvi.sleep(0.01) stream.buffer.feed(b'bar\n') gruvi.sleep(0.01) stream.buffer.feed_error(RuntimeError) gruvi.spawn(write_more) it = iter(stream) self.assertEqual(six.next(it), b'foo\n') self.assertEqual(six.next(it), b'bar\n') self.assertRaises(RuntimeError, six.next, it)
def test_wait_for_timeout(self): # When a timeout occurs, wait_for() should return False cond = gruvi.Condition() waiters = [0] def notify_cond(): with cond: waiters[0] += 1 cond.notify() waiters[0] -= 1 gruvi.spawn(notify_cond) with cond: self.assertEqual(waiters[0], 0) self.assertFalse(cond.wait_for(lambda: False, timeout=0.1)) self.assertEqual(waiters[0], 0)
def pair_neighbor_step1(self, node, source): """Start a new pairing process. A pairing process is started with node *node* residing in source *source*. The return value is a string containing a random cookie that identifies the current request. """ locator = instance(Locator) neighbor = locator.get_neighbor(node, source) if neighbor is None: raise NotFound("No such neighbor: {0}/{1}".format(source, node)) visible = neighbor["properties"].get("visible") if not visible: raise PairingError("Node is not visible") vault = neighbor["vault"] model = instance(Model) if model.get_vault(vault): raise PairingError("Vault already exists: {0}".format(vault)) cookie = crypto.random_cookie() def complete_pair_neighbor_step1(send_notification): name = util.gethostname() for addr in neighbor["addresses"]: client = SyncApiClient() addr = addr["addr"] try: client.connect(addr) except SyncApiError as e: continue # try next address try: kxid = client.pair_step1(vault, name) self._pairings[cookie] = (kxid, neighbor, addr) except Exception as e: self._log.exception("exception in step #1 of pairing") success = False info = errors.get_error_info(e) message = info.message if info is not None else "" else: success = True message = "OK" send_notification("PairNeighborStep1Completed", cookie, success, message) client.close() break gruvi.spawn(complete_pair_neighbor_step1, self.protocol.send_notification) return cookie
def test_fiber_safety_timeout(self): # Test correctness of the lock in case of timeouts. lock = self.Lock() order = [] def run_test(ix): lock.acquire() order.append(ix) lock.release() fibers = [] for i in range(self.nfibers): fibers.append(gruvi.spawn(run_test, i)) # There's 5 elements in lock._waiter now. Kill the first one, and # schedule a cancel for the second one. Both conditions should be # handled appropriately and no deadlocks should happen. lock.acquire() gruvi.sleep(0) # make sure all fibers are waiting on lock.acquire() fibers[0].cancel() gruvi.sleep(0) # first one will be gone self.assertFalse(fibers[0].is_alive()) fibers[1].cancel() # a Cancelled is now scheduled for number two self.assertTrue(fibers[1].is_alive()) lock.release() for fib in fibers: fib.join() # All fibers should have gotten the lock except 1 and 2. self.assertEqual(order, list(range(2, len(fibers))))
def thread_get(nfibers, count): fibers = [] for i in range(nfibers): fibers.append(gruvi.spawn(get_queue, count)) for fib in fibers: fib.join() gruvi.get_hub().close()
def test_timeout(self): # Ensure that the timeout argument to acquire() works. hub = get_hub() lock = gruvi.RLock() sync = gruvi.Lock() def lock_rlock(): lock.acquire() sync.acquire() lock.release() # This needs a new fiber, as the same fiber *can* lock the same RLock twice. sync.acquire() fiber = gruvi.spawn(lock_rlock) gruvi.sleep(0) self.assertTrue(lock.locked()) t0 = hub.loop.now() self.assertFalse(lock.acquire(timeout=0.01)) t1 = hub.loop.now() # Internally the event loop uses timestamps with a 1ms granularity. So # allow for that. self.assertGreaterEqual(t1 - t0, 10) sync.release() fiber.join() self.assertFalse(lock._callbacks)
def test_read_write_flow_control(self): # Test the read and write flow control of a stream transport. transport = MockTransport() protocol = StreamProtocol() transport.start(protocol) protocol.stream.buffer.set_buffer_limits(100) transport.set_write_buffer_limits(50) def reader(): while True: buf = protocol.stream.read(20) if not buf: break protocol.stream.write(buf) fib = gruvi.spawn(reader) buf = b'x' * 20 interrupted = 0 for i in range(100): protocol.data_received(buf) if transport._reading: continue interrupted += 1 self.assertGreater(protocol.stream.buffer.get_buffer_size(), 0) # Switch to the reader() fiber which will fill up the transport # write buffer. gruvi.sleep(0) # The transport write buffer should be full but the protocol read # buffer should still contain something. self.assertGreater(protocol.stream.buffer.get_buffer_size(), 0) self.assertFalse(transport._can_write.is_set()) # Drain write buffer and resume writing transport.drain() self.assertGreater(interrupted, 30) fib.cancel() gruvi.sleep(0)
def test_basic(self): # Ensure that a basic wait/notify works. cond = gruvi.Condition() waiting = [0] def wait_cond(): with cond: waiting[0] += 1 cond.wait() waiting[0] -= 1 gruvi.spawn(wait_cond) gruvi.sleep(0) self.assertEqual(waiting[0], 1) with cond: cond.notify() gruvi.sleep(0) self.assertEqual(waiting[0], 0)
def test_notify_multiple(self): # Ensure that multiple fibers can be notified, and that the order in # which they are notified is respected. cond = gruvi.Condition() waiting = [0] done = [] def wait_cond(i): with cond: waiting[0] += 1 cond.wait() waiting[0] -= 1 done.append(i) fibers = [] for i in range(10): fibers.append(gruvi.spawn(wait_cond, i)) gruvi.sleep(0) self.assertEqual(waiting[0], 10) with cond: cond.notify(1) gruvi.sleep(0) self.assertEqual(waiting[0], 9) with cond: cond.notify(3) gruvi.sleep(0) self.assertEqual(waiting[0], 6) with cond: cond.notify_all() gruvi.sleep(0) self.assertEqual(waiting[0], 0) self.assertEqual(done, list(range(10)))
def pair_neighbor_step2(self, cookie, pin, name, password): """Complete a pairing process. The *cookie* argument are returned by :meth:`pair_neighbor_step1`. The *pin* argument is the PIN code that the remote Bluepass instance showed to its user. The *name* and *password* arguments specify the name and password of the paired vault that is created in the local instance. Paired vaults will automatically be kept up to date. Changes made in a paired vault in once Bluepass instance will automatically be synced to other instances by the Bluepass backend. To get notified of new secrets that were added, listen for the ``VersionsAdded`` signal. """ if cookie not in self._pairings: raise NotFound("No such key exchange ID") kxid, neighbor, addr = self._pairings.pop(cookie) def complete_pair_neighbor_step2(send_notification): template = {"id": neighbor["vault"], "name": name, "password": password} vault = self._model.create_vault(template) certinfo = {"node": vault["node"], "name": util.gethostname()} certinfo["keys"] = self._model.get_public_keys(vault["id"]) client = SyncApiClient() client.connect(addr) try: peercert = client.pair_step2(vault["id"], kxid, pin, certinfo) self._model.create_certificate(vault["id"], peercert) except Exception as e: self._log.exception("exception in step #2 of pairing") success = False info = errors.get_error_info(e) message = info.message if info else "" self._model.delete_vault(vault["id"]) else: success = True message = "OK" try: client.sync(vault["id"], self._model) except SyncApiError: pass self._model.raise_event("VaultAdded", vault) send_notification("PairNeighborStep2Completed", cookie, success, message) client.close() gruvi.spawn(complete_pair_neighbor_step2, self.protocol.send_notification)
def test_cancel(self): # Ensure that it's possible to cancel() a fiber. def sleeper(): gruvi.sleep(1000) fiber = gruvi.spawn(sleeper) gruvi.sleep(0) self.assertTrue(fiber.alive) fiber.cancel() gruvi.sleep(0) self.assertFalse(fiber.alive)
def test_cleanup_on_fiber_exit(self): local = gruvi.local() def fiber1(): local.foo = 10 f1 = gruvi.spawn(fiber1) f1.join() # Access the local object as if access was from f1 self.assertIn('foo', local._keys[f1]) self.assertEqual(local._keys[f1]['foo'], 10) del f1; gc.collect() self.assertEqual(len(local._keys), 0)
def test_produce_consume(self): # Ensure that there's no deadlocks when pushing a large number of items # through a queue with a fixed size. queue = gruvi.Queue(maxsize=10) result = []; sizes = [] def consumer(n): for i in range(n): queue.put(i) sizes.append(queue.qsize()) def producer(n): for i in range(n): result.append(queue.get()) sizes.append(queue.qsize()) ni = 2000 fcons = gruvi.spawn(consumer, ni) fprod = gruvi.spawn(producer, ni) fcons.join(); fprod.join() self.assertEqual(len(result), ni) self.assertEqual(result, list(range(ni))) self.assertLessEqual(max(sizes), 10)
def test_spawn_nested(self): # Spawn fibers from fiber. joined = [] def spawn_fiber(i): if i == 0: return fiber = gruvi.spawn(spawn_fiber, i-1) fiber.join() joined.append(i-1) fiber = gruvi.spawn(spawn_fiber, 1000) fiber.join() self.assertEqual(joined, list(range(1000)))
def test_spawn_nested(self): # Spawn fibers from fiber. joined = [] def spawn_fiber(i): if i == 0: return fiber = gruvi.spawn(spawn_fiber, i - 1) fiber.join() joined.append(i - 1) fiber = gruvi.spawn(spawn_fiber, 1000) fiber.join() self.assertEqual(joined, list(range(1000)))
def test_basic(self): # Spawn fibers and join them. counter = [0] def worker(): counter[0] += 1 gruvi.sleep(0.1) fibers = [] for i in range(1000): fibers.append(gruvi.spawn(worker)) for fiber in fibers: self.assertTrue(fiber.alive) for fiber in fibers: fiber.join() self.assertEqual(counter[0], 1000) for fiber in fibers: self.assertFalse(fiber.alive)
def test_wait_for(self): # Ensure that wait_for can wait for a predicate cond = gruvi.Condition() waiting = [0] unblock = [] done = [] def wait_cond(i): with cond: waiting[0] += 1 cond.wait_for(lambda: i in unblock) waiting[0] -= 1 done.append(i) fibers = [] for i in range(10): fibers.append(gruvi.spawn(wait_cond, i)) gruvi.sleep(0) self.assertEqual(waiting[0], 10) with cond: cond.notify(1) # no predicate matches gruvi.sleep(0) self.assertEqual(waiting[0], 10) unblock += [0] with cond: cond.notify(1) # one predicate matches gruvi.sleep(0) self.assertEqual(waiting[0], 9) unblock += [2, 3] with cond: cond.notify(3) # two match gruvi.sleep(0) self.assertEqual(waiting[0], 7) unblock += [1] with cond: cond.notify_all() # one match gruvi.sleep(0) self.assertEqual(waiting[0], 6) unblock += list(range(10)) with cond: cond.notify_all() # one match gruvi.sleep(0) self.assertEqual(waiting[0], 0) self.assertEqual(done, [0, 2, 3, 1, 4, 5, 6, 7, 8, 9])
def test_non_blocking(self): # Ensure that the blocking argument to acquire() works. lock = gruvi.RLock() sync = gruvi.Lock() def lock_rlock(): lock.acquire() sync.acquire() lock.release() # This needs a new fiber, as the same fiber *can* lock the same RLock twice. sync.acquire() fiber = gruvi.spawn(lock_rlock) gruvi.sleep(0) self.assertTrue(lock.locked()) self.assertFalse(lock.acquire(blocking=False)) sync.release() fiber.join() self.assertFalse(lock._callbacks)
def test_lock_order(self): # Locks are fair and are granted in order. lock = self.Lock() order = [] def run_test(ix): lock.acquire() order.append(ix) lock.release() fibers = [] for i in range(self.nfibers): fibers.append(gruvi.spawn(run_test, i)) lock.acquire() gruvi.sleep(0) self.assertEqual(order, []) lock.release() for fib in fibers: fib.join() self.assertEqual(order, list(range(len(fibers))))