async def read(self, deserializers=("cuda", "dask", "pickle", "error")): with log_errors(): if deserializers is None: deserializers = ("cuda", "dask", "pickle", "error") resp = await self.ep.recv_future() obj = ucp.get_obj_from_msg(resp) (nframes, ) = struct.unpack( "Q", obj[:8]) # first eight bytes for number of frames gpu_frame_msg = obj[ 8:8 + nframes] # next nframes bytes for if they're GPU frames is_gpus = struct.unpack("{}?".format(nframes), gpu_frame_msg) sized_frame_msg = obj[8 + nframes:] # then the rest for frame sizes sizes = struct.unpack("{}Q".format(nframes), sized_frame_msg) frames = [] for i, (is_gpu, size) in enumerate(zip(is_gpus, sizes)): if size > 0: resp = await self.ep.recv_obj(size, cuda=is_gpu) else: resp = await self.ep.recv_future() frame = ucp.get_obj_from_msg(resp) frames.append(frame) msg = await from_frames(frames, deserialize=self.deserialize, deserializers=deserializers) return msg
async def talk_to_server(ip, port): global args global max_msg_log start_string = "in talk_to_server using " + args.object_type if args.blind_recv: start_string += " + blind recv" print(start_string) ep1 = ucp.get_endpoint(ip, port) ep2 = ucp.get_endpoint(ip, port) recv_msg = None if not args.blind_recv: recv_string1 = "hello from ucx client @" + socket.gethostname() if args.validate: recv_string1 = 'c' * (2 ** max_msg_log) recv_string2 = "hello from ucx client @" + socket.gethostname() if args.validate: recv_string2 = 'c' * (2 ** max_msg_log) recv_msg1 = get_msg(recv_string1, args.object_type) recv_msg2 = get_msg(recv_string2, args.object_type) recv_req1 = await ep1.recv_obj(recv_msg1, sys.getsizeof(recv_msg1)) recv_req2 = await ep2.recv_obj(recv_msg2, sys.getsizeof(recv_msg2)) else: recv_req1 = await ep1.recv_future() recv_req2 = await ep2.recv_future() recv_msg1 = ucp.get_obj_from_msg(recv_req1) recv_msg2 = ucp.get_obj_from_msg(recv_req2) print("about to send") send_string1 = "hello from ucx client ep2 @" + socket.gethostname() if args.validate: send_string = 'd' * (2 ** max_msg_log) send_string2 = "hello from ucx client ep1 @" + socket.gethostname() if args.validate: send_string = 'd' * (2 ** max_msg_log) send_msg1 = get_msg(send_string1, args.object_type) send_msg2 = get_msg(send_string2, args.object_type) send_req1 = await ep1.send_obj(send_msg1, sys.getsizeof(send_msg1)) send_req2 = await ep2.send_obj(send_msg2, sys.getsizeof(send_msg2)) if not args.validate: print_msg("client sent: ", send_msg1, args.object_type) print_msg("client sent: ", send_msg2, args.object_type) print_msg("client received: ", recv_msg1, args.object_type) print_msg("client received: ", recv_msg2, args.object_type) else: assert(recv_msg1 == get_msg('a' * (2 ** max_msg_log), args.object_type)) assert(recv_msg2 == get_msg('a' * (2 ** max_msg_log), args.object_type)) ucp.destroy_ep(ep1) ucp.destroy_ep(ep2) print('talk_to_server done')
async def test_send_recv_numba(size, dtype): numba = pytest.importorskip("numba") pytest.importorskip("numba.cuda") import numpy as np cuda_info = {"shape": [size], "typestr": dtype} x = "a" x = x * size msg = bytes(x, encoding="utf-8") msg = memoryview(msg) arr = np.array(msg, dtype=dtype) msg = numba.cuda.to_device(arr) gpu_alloc_size = msg.dtype.itemsize * msg.size async with echo_pair(cuda_info) as (_, client): await client.send_obj(bytes(str(gpu_alloc_size), encoding="utf-8")) await client.send_obj(msg) resp = await client.recv_obj(gpu_alloc_size, cuda=True) result = ucp.get_obj_from_msg(resp) assert hasattr(result, "__cuda_array_interface__") result.typestr = msg.__cuda_array_interface__["typestr"] result.shape = msg.shape n_result = numba.cuda.as_cuda_array(result) assert isinstance(n_result, numba.cuda.devicearray.DeviceNDArray) nn_result = np.asarray(n_result, dtype=dtype) msg = np.asarray(msg, dtype=dtype) np.testing.assert_array_equal(msg, nn_result)
async def test_send_recv_numba(): numba = pytest.importorskip('numba') pytest.importorskip('numba.cuda') import numpy as np cuda_info = { 'shape': [2], 'typestr': '|u1' } async with echo_pair(cuda_info) as (_, client): arr = np.array(memoryview(b"hi"), dtype='u1') msg = numba.cuda.to_device(arr) client.send_obj(b'2') await client.send_obj(msg) resp = await client.recv_obj(len(msg), cuda=True) result = ucp.get_obj_from_msg(resp) assert hasattr(result, '__cuda_array_interface__') result.typestr = msg.__cuda_array_interface__['typestr'] result = numba.cuda.as_cuda_array(result) assert isinstance(result, numba.cuda.devicearray.DeviceNDArray) result = np.asarray(result, dtype='|u1') msg = np.asarray(msg, dtype='|u1') np.testing.assert_array_equal(msg, result)
async def test_send_recv_bytes(): async with echo_pair() as (_, client): msg = b"hi" await client.send_obj(b'2') await client.send_obj(msg) resp = await client.recv_obj(len(msg)) result = ucp.get_obj_from_msg(resp) assert result.tobytes() == msg
async def test_send_recv_memoryview(): async with echo_pair() as (_, client): msg = memoryview(b"hi") await client.send_obj(b'2') await client.send_obj(msg) resp = await client.recv_obj(len(msg)) result = ucp.get_obj_from_msg(resp) assert result == msg
async def test_send_recv_numpy(): np = pytest.importorskip('numpy') async with echo_pair() as (_, client): msg = np.frombuffer(memoryview(b"hi"), dtype='u1') await client.send_obj(b'2') await client.send_obj(msg) resp = await client.recv_obj(len(msg)) result = ucp.get_obj_from_msg(resp) result = np.frombuffer(result, 'u1') np.testing.assert_array_equal(result, msg)
async def test_send_recv_python_things(thing): import msgpack msg = msgpack.dumps(thing) size = len(msg) async with echo_pair() as (_, client): await client.send_obj(bytes(str(size), encoding="utf-8")) await client.send_obj(msg) resp = await client.recv_obj(len(msg)) result = ucp.get_obj_from_msg(resp) assert result.tobytes() == msg
async def test_send_recv_bytes(size): x = "a" x = x * size msg = bytes(x, encoding="utf-8") async with echo_pair() as (_, client): await client.send_obj(bytes(str(size), encoding="utf-8")) await client.send_obj(msg) resp = await client.recv_obj(len(msg)) result = ucp.get_obj_from_msg(resp) assert result.tobytes() == msg
async def test_send_recv_numpy(size, dtype): np = pytest.importorskip("numpy") msg = np.arange(size, dtype=dtype) alloc_size = msg.nbytes async with echo_pair() as (_, client): await client.send_obj(bytes(str(alloc_size), encoding="utf-8")) await client.send_obj(msg) resp = await client.recv_obj(alloc_size) result = ucp.get_obj_from_msg(resp) result = np.frombuffer(result, dtype) np.testing.assert_array_equal(result, msg)
async def serve(ep, lf): print("5. Starting serve") if message: # msg = await ep.recv_future() size = len(client_msg) msg = await ep.recv_obj(size) msg = ucp.get_obj_from_msg(msg) print("6. Server got message", bytes(msg).decode()) # response = "Got: {}".format(server_msg.decode()).encode() await ep.send_obj(box(server_msg), name='serve-send') print('7. Stopping server') ucp.destroy_ep(ep) ucp.stop_listener(lf)
async def test_send_recv_cupy(): cupy = pytest.importorskip('cupy') cuda_info = {'shape': [2], 'typestr': '|u1'} async with echo_pair(cuda_info) as (_, client): msg = cupy.array(memoryview(b"hi"), dtype='u1') client.send_obj(b'2') await client.send_obj(msg) resp = await client.recv_obj(len(msg), cuda=True) result = ucp.get_obj_from_msg(resp) assert hasattr(result, '__cuda_array_interface__') result.typestr = msg.__cuda_array_interface__['typestr'] result = cupy.asarray(result) cupy.testing.assert_array_equal(msg, result)
async def talk_to_client(ep, listener): global args global max_msg_log global count start_string = "in talk_to_client using " + args.object_type if args.blind_recv: start_string += " + blind recv" print(start_string) print("about to send") send_string = "hello from ucx server @" + socket.gethostname() if args.validate: send_string = 'a' * (2 ** max_msg_log) send_msg = get_msg(send_string, args.object_type) send_req = await ep.send_obj(send_msg, sys.getsizeof(send_msg)) recv_msg = None print("about to recv") if not args.blind_recv: recv_string = "hello from ucx server @" + socket.gethostname() if args.validate: recv_string = 'b' * (2 ** max_msg_log) recv_msg = get_msg(recv_string, args.object_type) recv_req = await ep.recv_obj(recv_msg, sys.getsizeof(recv_msg)) else: recv_req = await ep.recv_future() recv_msg = ucp.get_obj_from_msg(recv_req) if not args.validate: print_msg("server sent: ", send_msg, args.object_type) print_msg("server received: ", recv_msg, args.object_type) else: assert(recv_msg == get_msg('d' * (2 ** max_msg_log), args.object_type)) ucp.destroy_ep(ep) print('talk_to_client done') count += 1 if 2 == count: ucp.stop_listener(listener) print('past attempt to stop listener')
async def connect(host, port=13337, message=True, type_='bytes'): if type_ == 'memoryview': box = memoryview else: box = bytes print("3. Starting connect") ep = ucp.get_endpoint(host, port) if message: print("4. Client send") msg = box(client_msg) await ep.send_obj(msg, name='connect-send') # resp = await ep.recv_future() size = len(client_msg) resp = await ep.recv_obj(size) r_msg = ucp.get_obj_from_msg(resp) print("8. Client got message: {}".format(bytes(r_msg).decode())) print("9. Stopping client") ucp.destroy_ep(ep)
async def test_send_recv_cupy(size, dtype): cupy = pytest.importorskip("cupy") cuda_info = {"shape": [size], "typestr": dtype} x = "a" x = x * size msg = bytes(x, encoding="utf-8") msg = memoryview(msg) msg = cupy.array(msg, dtype=dtype) gpu_alloc_size = msg.dtype.itemsize * msg.size async with echo_pair(cuda_info) as (_, client): await client.send_obj(bytes(str(gpu_alloc_size), encoding="utf-8")) await client.send_obj(msg) resp = await client.recv_obj(gpu_alloc_size, cuda=True) result = ucp.get_obj_from_msg(resp) assert hasattr(result, "__cuda_array_interface__") result.typestr = msg.__cuda_array_interface__["typestr"] result.shape = msg.shape result = cupy.asarray(result) cupy.testing.assert_array_equal(msg, result)
async def talk_to_client(ep, listener): print("about to send") base = b"0" * args.n_bytes send_msg = get_msg(base, args.object_type) await ep.send_obj(send_msg) print("about to recv") if not args.blind_recv: recv_req = await ep.recv_obj(args.n_bytes) recv_msg = get_msg(recv_req.get_obj(), args.object_type) else: recv_req = await ep.recv_future() recv_msg = ucp.get_obj_from_msg(recv_req) if not args.validate: print("server sent: ", reprlib.repr(send_msg), type(send_msg)) print("server recv: ", reprlib.repr(recv_msg), type(recv_msg)) else: check(send_msg, recv_msg, args.object_type) ucp.destroy_ep(ep) print("talk_to_client done") ucp.stop_listener(listener)
async def test_send_recv_large_data(size): # 2**26 * 8 bytes ~.5 GB # 2**27 * 8 bytes ~1 GB # 2**28 * 8 bytes ~2 GB pytest.importorskip("numba.cuda") cupy = pytest.importorskip("cupy") dtype = "i8" cuda_info = {"shape": [size], "typestr": dtype} msg = cupy.arange(size, dtype=dtype) gpu_alloc_size = msg.dtype.itemsize * msg.size async with echo_pair(cuda_info) as (_, client): await client.send_obj(bytes(str(gpu_alloc_size), encoding="utf-8")) await client.send_obj(msg) resp = await client.recv_obj(gpu_alloc_size, cuda=True) result = ucp.get_obj_from_msg(resp) assert hasattr(result, "__cuda_array_interface__") result.typestr = msg.__cuda_array_interface__["typestr"] result.shape = msg.shape result = cupy.asarray(result) cupy.testing.assert_array_equal(msg, result)
async def talk_to_client(ep, listener): recv_req = await ep.recv_future() recv_msg = ucp.get_obj_from_msg(recv_req) ucp.destroy_ep(ep) ucp.stop_listener(listener)