Exemple #1
0
    def testEnsureTimeout(self, *_):
        from mars.errors import PromiseTimeout

        pool_address = '127.0.0.1:%d' % get_next_port()
        with create_actor_pool(n_process=1,
                               backend='gevent',
                               address=pool_address) as pool:
            pool.create_actor(PlasmaKeyMapActor,
                              uid=PlasmaKeyMapActor.default_name())
            pool.create_actor(WorkerClusterInfoActor,
                              schedulers=[pool_address],
                              uid=WorkerClusterInfoActor.default_name())
            pool.create_actor(KVStoreActor, uid=KVStoreActor.default_name())
            pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
            pool.create_actor(QuotaActor,
                              1024 * 1024 * 10,
                              uid=MemQuotaActor.default_name())
            pool.create_actor(SpillActor, uid=SpillActor.default_name())
            cache_ref = pool.create_actor(ChunkHolderActor,
                                          self.plasma_storage_size,
                                          uid=ChunkHolderActor.default_name())

            try:
                options.worker.prepare_data_timeout = 2
                test_ref = pool.create_actor(CacheTestActor)
                test_ref.run_test_ensure_timeout()
                while not test_ref.get_exc_info()[0]:
                    pool.sleep(0.1)
                exc_info = test_ref.get_exc_info()[1]
                self.assertIsNotNone(exc_info)
                self.assertIsInstance(exc_info[1], PromiseTimeout)
            finally:
                options.worker.prepare_data_timeout = 600
                pool.destroy_actor(cache_ref)
Exemple #2
0
def run_transfer_worker(pool_address, session_id, chunk_keys, spill_dir, msg_queue):
    options.worker.spill_directory = spill_dir
    plasma_size = 1024 * 1024 * 10

    # don't use multiple with-statement as we need the options be forked
    with plasma.start_plasma_store(plasma_size) as store_args:
        options.worker.plasma_socket = plasma_socket = store_args[0]
        plasma_client = plasma.connect(plasma_socket, '', 0)

        with start_transfer_test_pool(address=pool_address, plasma_size=plasma_size) as pool:
            chunk_holder_ref = pool.actor_ref(ChunkHolderActor.default_name())
            mapper_ref = pool.actor_ref(PlasmaKeyMapActor.default_name())
            plasma_store = PlasmaChunkStore(plasma_client, mapper_ref)

            for _ in range(2):
                pool.create_actor(SenderActor, uid='%s' % str(uuid.uuid4()))
                pool.create_actor(ReceiverActor, uid='%s' % str(uuid.uuid4()))

            for idx in range(0, len(chunk_keys) - 7):
                data = np.ones((640 * 1024,), dtype=np.int16) * idx
                write_spill_file(chunk_keys[idx], data)
            for idx in range(len(chunk_keys) - 7, len(chunk_keys)):
                data = np.ones((640 * 1024,), dtype=np.int16) * idx
                plasma_store.put(session_id, chunk_keys[idx], data)
                chunk_holder_ref.register_chunk(session_id, chunk_keys[idx])

            msg_queue.put(plasma_socket)
            t = time.time()
            while True:
                try:
                    msg_queue.get_nowait()
                except Empty:
                    if time.time() > t + 60:
                        raise SystemError('Transfer finish timed out.')
                    pool.sleep(0.1)
Exemple #3
0
    def testHolder(self):
        pool_address = '127.0.0.1:%d' % get_next_port()
        with create_actor_pool(n_process=1,
                               backend='gevent',
                               address=pool_address) as pool:
            pool.create_actor(PlasmaKeyMapActor,
                              uid=PlasmaKeyMapActor.default_name())
            pool.create_actor(WorkerClusterInfoActor,
                              schedulers=[pool_address],
                              uid=WorkerClusterInfoActor.default_name())
            pool.create_actor(KVStoreActor, uid=KVStoreActor.default_name())
            pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
            pool.create_actor(QuotaActor,
                              1024 * 1024 * 10,
                              uid=MemQuotaActor.default_name())
            cache_ref = pool.create_actor(ChunkHolderActor,
                                          self.plasma_storage_size,
                                          uid=ChunkHolderActor.default_name())
            pool.create_actor(SpillActor)

            try:
                test_ref = pool.create_actor(CacheTestActor)
                test_ref.run_test_cache()
                while not test_ref.get_exc_info()[0]:
                    pool.sleep(0.1)
                exc_info = test_ref.get_exc_info()[1]
                if exc_info:
                    six.reraise(*exc_info)
            finally:
                pool.destroy_actor(cache_ref)
Exemple #4
0
def start_transfer_test_pool(**kwargs):
    address = kwargs.pop('address')
    plasma_size = kwargs.pop('plasma_size')
    with create_actor_pool(n_process=1,
                           backend='gevent',
                           address=address,
                           **kwargs) as pool:
        pool.create_actor(SchedulerClusterInfoActor,
                          schedulers=[address],
                          uid=SchedulerClusterInfoActor.default_uid())
        pool.create_actor(WorkerClusterInfoActor,
                          schedulers=[address],
                          uid=WorkerClusterInfoActor.default_uid())

        pool.create_actor(PlasmaKeyMapActor,
                          uid=PlasmaKeyMapActor.default_uid())
        pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
        pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
        pool.create_actor(QuotaActor,
                          1024 * 1024 * 20,
                          uid=MemQuotaActor.default_uid())
        chunk_holder_ref = pool.create_actor(
            ChunkHolderActor, plasma_size, uid=ChunkHolderActor.default_uid())
        pool.create_actor(SpillActor)
        pool.create_actor(StatusActor, address, uid=StatusActor.default_uid())

        yield pool

        chunk_holder_ref.destroy()
Exemple #5
0
    def create_standard_actors(cls,
                               pool,
                               address,
                               quota_size=None,
                               with_daemon=True,
                               with_status=True,
                               with_resource=False):
        quota_size = quota_size or (1024 * 1024)
        pool.create_actor(PlasmaKeyMapActor,
                          uid=PlasmaKeyMapActor.default_name())
        pool.create_actor(ClusterInfoActor,
                          schedulers=[address],
                          uid=ClusterInfoActor.default_name())

        if with_resource:
            pool.create_actor(ResourceActor, uid=ResourceActor.default_name())
        if with_daemon:
            pool.create_actor(WorkerDaemonActor,
                              uid=WorkerDaemonActor.default_name())
        if with_status:
            pool.create_actor(StatusActor,
                              address,
                              uid=StatusActor.default_name())

        pool.create_actor(ChunkHolderActor,
                          cls.plasma_storage_size,
                          uid=ChunkHolderActor.default_name())
        pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name())
        pool.create_actor(TaskQueueActor, uid=TaskQueueActor.default_name())
        pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
        pool.create_actor(QuotaActor,
                          quota_size,
                          uid=MemQuotaActor.default_name())
        pool.create_actor(ExecutionActor, uid=ExecutionActor.default_name())
Exemple #6
0
    def testSimpleTransfer(self):
        session_id = str(uuid.uuid4())

        local_pool_addr = 'localhost:%d' % get_next_port()
        remote_pool_addr = 'localhost:%d' % get_next_port()
        remote_chunk_keys = [str(uuid.uuid4()) for _ in range(9)]
        msg_queue = multiprocessing.Queue()

        remote_spill_dir = os.path.join(tempfile.gettempdir(),
                                        'mars_spill_%d_%d' % (os.getpid(), id(run_transfer_worker)))

        proc = multiprocessing.Process(
            target=run_transfer_worker,
            args=(remote_pool_addr, session_id, remote_chunk_keys, remote_spill_dir, msg_queue)
        )
        proc.start()
        try:
            remote_plasma_socket = msg_queue.get(timeout=30)
        except Empty:
            if proc.is_alive():
                proc.terminate()
            raise

        with start_transfer_test_pool(address=local_pool_addr, plasma_size=self.plasma_storage_size) as pool:
            sender_refs, receiver_refs = [], []
            for _ in range(2):
                sender_refs.append(pool.create_actor(SenderActor, uid=str(uuid.uuid4())))
                receiver_refs.append(pool.create_actor(ReceiverActor, uid=str(uuid.uuid4())))

            try:
                for data_id in (-1, 0):
                    chunk_key = remote_chunk_keys[data_id]

                    with self.run_actor_test(pool) as test_actor:
                        remote_dispatch_ref = test_actor.promise_ref(
                            DispatchActor.default_name(), address=remote_pool_addr)
                        remote_mapper_ref = pool.actor_ref(
                            PlasmaKeyMapActor.default_name(), address=remote_pool_addr)
                        remote_plasma_client = plasma.connect(remote_plasma_socket, '', 0)
                        remote_store = PlasmaChunkStore(remote_plasma_client, remote_mapper_ref)

                        def _call_send_data(sender_uid):
                            sender_ref = test_actor.promise_ref(sender_uid, address=remote_pool_addr)
                            return sender_ref.send_data(session_id, chunk_key, local_pool_addr, _promise=True)

                        def _test_data_exist(*_):
                            try:
                                local_data = test_actor._chunk_store.get(session_id, chunk_key)
                            except KeyError:
                                with open(build_spill_file_name(chunk_key), 'rb') as spill_file:
                                    local_data = dataserializer.load(spill_file)

                            try:
                                remote_data = remote_store.get(session_id, chunk_key)
                            except KeyError:
                                with open(build_spill_file_name(chunk_key, remote_spill_dir), 'rb') as spill_file:
                                    remote_data = dataserializer.load(spill_file)
                            assert_array_equal(local_data, remote_data)

                            del local_data, remote_data

                        remote_dispatch_ref.get_free_slot('sender', _promise=True) \
                            .then(_call_send_data) \
                            .then(_test_data_exist) \
                            .then(
                            lambda *_: test_actor.set_result(chunk_key),
                            lambda *exc: test_actor.set_result(exc, False),
                        )
                    self.assertEqual(self.get_result(60), chunk_key)

                msg_queue.put(1)
            finally:
                [pool.destroy_actor(ref) for ref in sender_refs + receiver_refs]

                os.unlink(remote_plasma_socket)
                os.kill(proc.pid, signal.SIGINT)

                t = time.time()
                while proc.is_alive() and time.time() < t + 2:
                    time.sleep(1)
                if proc.is_alive():
                    proc.terminate()
Exemple #7
0
    def testReceiver(self):
        pool_addr = 'localhost:%d' % get_next_port()
        options.worker.spill_directory = os.path.join(
            tempfile.gettempdir(), 'mars_spill_%d_%d' % (os.getpid(), id(run_transfer_worker)))
        session_id = str(uuid.uuid4())

        mock_data = np.array([1, 2, 3, 4])
        serialized_mock_data = dataserializer.dumps(mock_data)
        serialized_crc32 = zlib.crc32(serialized_mock_data)

        chunk_key1 = str(uuid.uuid4())
        chunk_key2 = str(uuid.uuid4())
        chunk_key3 = str(uuid.uuid4())
        chunk_key4 = str(uuid.uuid4())
        chunk_key5 = str(uuid.uuid4())
        chunk_key6 = str(uuid.uuid4())

        with start_transfer_test_pool(address=pool_addr, plasma_size=self.plasma_storage_size) as pool:
            chunk_holder_ref = pool.actor_ref(ChunkHolderActor.default_name())
            mapper_ref = pool.actor_ref(PlasmaKeyMapActor.default_name())
            receiver_ref = pool.create_actor(ReceiverActor, uid=str(uuid.uuid4()))

            store = PlasmaChunkStore(self._plasma_client, mapper_ref)

            # check_status on receiving and received
            self.assertEqual(receiver_ref.check_status(session_id, chunk_key1),
                             ReceiveStatus.NOT_STARTED)

            write_spill_file(chunk_key1, mock_data)
            self.assertEqual(receiver_ref.check_status(session_id, chunk_key1),
                             ReceiveStatus.RECEIVED)
            os.unlink(build_spill_file_name(chunk_key1))

            ref = store.put(session_id, chunk_key1, mock_data)
            data_size = store.get_actual_size(session_id, chunk_key1)
            chunk_holder_ref.register_chunk(session_id, chunk_key1)
            del ref
            self.assertEqual(receiver_ref.check_status(session_id, chunk_key1),
                             ReceiveStatus.RECEIVED)

            with self.run_actor_test(pool) as test_actor:
                receiver_ref_p = test_actor.promise_ref(receiver_ref)

                # cancel on an un-run / missing result will result in nothing
                receiver_ref_p.cancel_receive(session_id, chunk_key2)

                # start creating writer
                receiver_ref_p.create_data_writer(session_id, chunk_key1, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, ReceiveStatus.RECEIVED))

                receiver_ref_p.create_data_writer(session_id, chunk_key2, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, None))

                receiver_ref_p.create_data_writer(session_id, chunk_key2, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, ReceiveStatus.RECEIVING))

                receiver_ref_p.cancel_receive(session_id, chunk_key2)
                self.assertEqual(receiver_ref.check_status(session_id, chunk_key2),
                                 ReceiveStatus.NOT_STARTED)

                # test checksum error on receive_data_part
                receiver_ref_p.create_data_writer(session_id, chunk_key2, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))

                receiver_ref_p.register_finish_callback(session_id, chunk_key2, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))

                receiver_ref_p.receive_data_part(session_id, chunk_key2, serialized_mock_data, 0)

                with self.assertRaises(ChecksumMismatch):
                    self.get_result(5)

                # test checksum error on finish_receive
                receiver_ref_p.create_data_writer(session_id, chunk_key2, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, None))

                receiver_ref_p.receive_data_part(session_id, chunk_key2, serialized_mock_data, serialized_crc32)
                receiver_ref_p.finish_receive(session_id, chunk_key2, 0)

                receiver_ref_p.register_finish_callback(session_id, chunk_key2, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))

                with self.assertRaises(ChecksumMismatch):
                    self.get_result(5)

                receiver_ref_p.cancel_receive(session_id, chunk_key2)

                # test intermediate cancellation
                receiver_ref_p.create_data_writer(session_id, chunk_key2, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, None))

                receiver_ref_p.register_finish_callback(session_id, chunk_key2, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))

                receiver_ref_p.receive_data_part(session_id, chunk_key2, serialized_mock_data[:64],
                                                 zlib.crc32(serialized_mock_data[:64]))
                receiver_ref_p.cancel_receive(session_id, chunk_key2)
                receiver_ref_p.receive_data_part(session_id, chunk_key2, serialized_mock_data[64:],
                                                 serialized_crc32)
                with self.assertRaises(ExecutionInterrupted):
                    self.get_result(5)

                # test transfer in memory
                receiver_ref_p.register_finish_callback(session_id, chunk_key3, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))

                receiver_ref_p.create_data_writer(session_id, chunk_key3, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, None))

                receiver_ref_p.receive_data_part(session_id, chunk_key3, serialized_mock_data[:64],
                                                 zlib.crc32(serialized_mock_data[:64]))
                receiver_ref_p.receive_data_part(session_id, chunk_key3, serialized_mock_data[64:], serialized_crc32)
                receiver_ref_p.finish_receive(session_id, chunk_key3, serialized_crc32)

                self.assertTupleEqual((), self.get_result(5))

                receiver_ref_p.create_data_writer(session_id, chunk_key3, data_size, test_actor, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, ReceiveStatus.RECEIVED))

                # test transfer in spill file
                def mocked_store_create(*_):
                    raise StoreFull

                with patch_method(PlasmaChunkStore.create, new=mocked_store_create):
                    # test receive aborted
                    receiver_ref_p.create_data_writer(
                        session_id, chunk_key4, data_size, test_actor, ensure_cached=False, _promise=True) \
                        .then(lambda *s: test_actor.set_result(s, destroy=False))
                    self.assertTupleEqual(self.get_result(5), (receiver_ref.address, None))

                    receiver_ref_p.register_finish_callback(session_id, chunk_key4, _promise=True) \
                        .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                        .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))

                    receiver_ref_p.receive_data_part(session_id, chunk_key4, serialized_mock_data[:64],
                                                     zlib.crc32(serialized_mock_data[:64]))
                    receiver_ref_p.cancel_receive(session_id, chunk_key4)
                    with self.assertRaises(ExecutionInterrupted):
                        self.get_result(5)

                    # test receive into spill
                    receiver_ref_p.create_data_writer(
                        session_id, chunk_key4, data_size, test_actor, ensure_cached=False, _promise=True) \
                        .then(lambda *s: test_actor.set_result(s, destroy=False))
                    self.assertTupleEqual(self.get_result(5), (receiver_ref.address, None))

                    receiver_ref_p.register_finish_callback(session_id, chunk_key4, _promise=True) \
                        .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                        .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))

                    receiver_ref_p.receive_data_part(session_id, chunk_key4, serialized_mock_data, serialized_crc32)
                    receiver_ref_p.finish_receive(session_id, chunk_key4, serialized_crc32)

                    self.assertTupleEqual((), self.get_result(5))

                # test intermediate error
                def mocked_store_create(*_):
                    raise SpillNotConfigured

                with patch_method(PlasmaChunkStore.create, new=mocked_store_create):
                    receiver_ref_p.create_data_writer(
                        session_id, chunk_key5, data_size, test_actor, ensure_cached=False, _promise=True) \
                        .then(lambda *s: test_actor.set_result(s, destroy=False),
                              lambda *s: test_actor.set_result(s, accept=False, destroy=False))

                    with self.assertRaises(SpillNotConfigured):
                        self.get_result(5)

                # test receive timeout
                receiver_ref_p.register_finish_callback(session_id, chunk_key6, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))

                receiver_ref_p.create_data_writer(session_id, chunk_key6, data_size, test_actor,
                                                  timeout=2, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False))
                self.assertTupleEqual(self.get_result(5), (receiver_ref.address, None))
                receiver_ref_p.receive_data_part(session_id, chunk_key6, serialized_mock_data[:64],
                                                 zlib.crc32(serialized_mock_data[:64]))

                with self.assertRaises(TimeoutError):
                    self.get_result(5)
Exemple #8
0
    def testSender(self):
        send_pool_addr = 'localhost:%d' % get_next_port()
        recv_pool_addr = 'localhost:%d' % get_next_port()
        recv_pool_addr2 = 'localhost:%d' % get_next_port()

        options.worker.spill_directory = os.path.join(
            tempfile.gettempdir(), 'mars_spill_%d_%d' % (os.getpid(), id(run_transfer_worker)))
        session_id = str(uuid.uuid4())

        mock_data = np.array([1, 2, 3, 4])
        chunk_key1 = str(uuid.uuid4())
        chunk_key2 = str(uuid.uuid4())

        @contextlib.contextmanager
        def start_send_recv_pool():
            with start_transfer_test_pool(
                    address=send_pool_addr, plasma_size=self.plasma_storage_size) as sp:
                sp.create_actor(SenderActor, uid=SenderActor.default_name())
                with start_transfer_test_pool(
                        address=recv_pool_addr, plasma_size=self.plasma_storage_size) as rp:
                    rp.create_actor(MockReceiverActor, uid=ReceiverActor.default_name())
                    yield sp, rp

        with start_send_recv_pool() as (send_pool, recv_pool):
            chunk_holder_ref = send_pool.actor_ref(ChunkHolderActor.default_name())
            sender_ref = send_pool.actor_ref(SenderActor.default_name())
            receiver_ref = recv_pool.actor_ref(ReceiverActor.default_name())

            sender_mapper_ref = send_pool.actor_ref(PlasmaKeyMapActor.default_name())
            store = PlasmaChunkStore(self._plasma_client, sender_mapper_ref)

            with self.run_actor_test(send_pool) as test_actor:
                # send when data missing
                sender_ref_p = test_actor.promise_ref(sender_ref)
                sender_ref_p.send_data(session_id, str(uuid.uuid4()), recv_pool_addr, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))
                with self.assertRaises(DependencyMissing):
                    self.get_result(5)

                # send data in spill
                write_spill_file(chunk_key1, mock_data)

                sender_ref_p.send_data(session_id, chunk_key1, recv_pool_addr, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))
                self.get_result(5)
                assert_array_equal(mock_data, receiver_ref.get_result_data(session_id, chunk_key1))
                os.unlink(build_spill_file_name(chunk_key1))

                # send data in plasma store
                store.put(session_id, chunk_key1, mock_data)
                chunk_holder_ref.register_chunk(session_id, chunk_key1)

                sender_ref_p.send_data(session_id, chunk_key1, recv_pool_addr, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))
                self.get_result(5)
                assert_array_equal(mock_data, receiver_ref.get_result_data(session_id, chunk_key1))

                # send data to multiple targets
                with start_transfer_test_pool(
                        address=recv_pool_addr2, plasma_size=self.plasma_storage_size) as rp2:
                    recv_ref2 = rp2.create_actor(MockReceiverActor, uid=ReceiverActor.default_name())

                    sender_ref_p.send_data(session_id, chunk_key1,
                                           [recv_pool_addr, recv_pool_addr2], _promise=True)
                    # send data to already transferred / transferring
                    sender_ref_p.send_data(session_id, chunk_key1,
                                           [recv_pool_addr, recv_pool_addr2], _promise=True) \
                        .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                        .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))
                    self.get_result(5)
                    assert_array_equal(mock_data, recv_ref2.get_result_data(session_id, chunk_key1))

                # send data to non-exist endpoint which causes error
                store.put(session_id, chunk_key2, mock_data)
                chunk_holder_ref.register_chunk(session_id, chunk_key2)

                sender_ref_p.send_data(session_id, chunk_key2, recv_pool_addr2, _promise=True) \
                    .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                    .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))
                with self.assertRaises(BrokenPipeError):
                    self.get_result(5)

                def mocked_receive_data_part(*_):
                    raise ChecksumMismatch

                with patch_method(MockReceiverActor.receive_data_part, new=mocked_receive_data_part):
                    sender_ref_p.send_data(session_id, chunk_key2, recv_pool_addr, _promise=True) \
                        .then(lambda *s: test_actor.set_result(s, destroy=False)) \
                        .catch(lambda *exc: test_actor.set_result(exc, accept=False, destroy=False))
                    with self.assertRaises(ChecksumMismatch):
                        self.get_result(5)