Example #1
0
 async def stop_stream(self, stream: ManagedStream):
     stream.stop_download()
     if not stream.finished and os.path.isfile(stream.full_path):
         try:
             os.remove(stream.full_path)
         except OSError as err:
             log.warning("Failed to delete partial download %s from downloads directory: %s", stream.full_path,
                         str(err))
     if stream.running:
         stream.update_status(ManagedStream.STATUS_STOPPED)
         await self.storage.change_file_status(stream.stream_hash, ManagedStream.STATUS_STOPPED)
Example #2
0
    async def delete_stream(self, stream: ManagedStream, delete_file: typing.Optional[bool] = False):
        stream_finished = False if not stream.finished and stream.downloader\
            else (stream.downloader and stream.downloader.stream_finished_event.is_set())
        if not stream_finished:
            delete_file = True
        stream.stop_download()
        self.streams.remove(stream)
        await self.storage.delete_stream(stream.descriptor)

        blob_hashes = [stream.sd_hash]
        for blob_info in stream.descriptor.blobs[:-1]:
            blob_hashes.append(blob_info.blob_hash)
        await self.blob_manager.delete_blobs(blob_hashes)
        if delete_file:
            path = os.path.join(stream.download_directory, stream.file_name)
            if os.path.isfile(path):
                os.remove(path)
Example #3
0
    async def start_stream(self, stream: ManagedStream) -> bool:
        """
        Resume or rebuild a partial or completed stream
        """

        path = os.path.join(stream.download_directory, stream.file_name)

        if not stream.running and not os.path.isfile(path):
            if stream.downloader:
                stream.downloader.stop()
                stream.downloader = None

            # the directory is gone, can happen when the folder that contains a published file is deleted
            # reset the download directory to the default and update the file name
            if not os.path.isdir(stream.download_directory):
                stream.download_directory = self.config.download_dir

            stream.downloader = self.make_downloader(
                stream.sd_hash, stream.download_directory, stream.descriptor.suggested_file_name
            )
            if stream.status != ManagedStream.STATUS_FINISHED:
                await self.storage.change_file_status(stream.stream_hash, 'running')
                stream.update_status('running')
            stream.start_download(self.node)
            try:
                await asyncio.wait_for(self.loop.create_task(stream.downloader.got_descriptor.wait()),
                                       self.config.download_timeout)
            except asyncio.TimeoutError:
                stream.stop_download()
                stream.downloader = None
                return False
            file_name = os.path.basename(stream.downloader.output_path)
            await self.storage.change_file_download_dir_and_file_name(
                stream.stream_hash, self.config.download_dir, file_name
            )
            self.wait_for_stream_finished(stream)
            return True
        return True
Example #4
0
class TestManagedStream(BlobExchangeTestBase):
    async def create_stream(self, blob_count: int = 10):
        self.stream_bytes = b''
        for _ in range(blob_count):
            self.stream_bytes += os.urandom((MAX_BLOB_SIZE - 1))
        # create the stream
        file_path = os.path.join(self.server_dir, "test_file")
        with open(file_path, 'wb') as f:
            f.write(self.stream_bytes)
        descriptor = await StreamDescriptor.create_stream(
            self.loop, self.server_blob_manager.blob_dir, file_path)
        self.sd_hash = descriptor.calculate_sd_hash()
        return descriptor

    async def setup_stream(self, blob_count: int = 10):
        await self.create_stream(blob_count)
        self.stream = ManagedStream(self.loop, self.client_config,
                                    self.client_blob_manager, self.sd_hash,
                                    self.client_dir)

    async def _test_transfer_stream(self,
                                    blob_count: int,
                                    mock_accumulate_peers=None):
        await self.setup_stream(blob_count)
        mock_node = mock.Mock(spec=Node)

        def _mock_accumulate_peers(q1, q2):
            async def _task():
                pass

            q2.put_nowait([self.server_from_client])
            return q2, self.loop.create_task(_task())

        mock_node.accumulate_peers = mock_accumulate_peers or _mock_accumulate_peers
        await self.stream.setup(mock_node, save_file=True)
        await self.stream.finished_writing.wait()
        self.assertTrue(os.path.isfile(self.stream.full_path))
        self.stream.stop_download()
        self.assertTrue(os.path.isfile(self.stream.full_path))
        with open(self.stream.full_path, 'rb') as f:
            self.assertEqual(f.read(), self.stream_bytes)
        await asyncio.sleep(0.01)

    async def test_transfer_stream(self):
        await self._test_transfer_stream(10)

    @unittest.SkipTest
    async def test_transfer_hundred_blob_stream(self):
        await self._test_transfer_stream(100)

    async def test_transfer_stream_bad_first_peer_good_second(self):
        await self.setup_stream(2)

        mock_node = mock.Mock(spec=Node)
        q = asyncio.Queue()

        bad_peer = KademliaPeer(self.loop,
                                "127.0.0.1",
                                b'2' * 48,
                                tcp_port=3334)

        def _mock_accumulate_peers(q1, q2):
            async def _task():
                pass

            q2.put_nowait([bad_peer])
            self.loop.call_later(1, q2.put_nowait, [self.server_from_client])
            return q2, self.loop.create_task(_task())

        mock_node.accumulate_peers = _mock_accumulate_peers

        await self.stream.setup(mock_node, save_file=True)
        await self.stream.finished_writing.wait()
        self.assertTrue(os.path.isfile(self.stream.full_path))
        with open(self.stream.full_path, 'rb') as f:
            self.assertEqual(f.read(), self.stream_bytes)
        # self.assertIs(self.server_from_client.tcp_last_down, None)
        # self.assertIsNot(bad_peer.tcp_last_down, None)

    async def test_client_chunked_response(self):
        self.server.stop_server()

        class ChunkedServerProtocol(BlobServerProtocol):
            def send_response(self, responses):
                to_send = []
                while responses:
                    to_send.append(responses.pop())
                for byte in BlobResponse(to_send).serialize():
                    self.transport.write(bytes([byte]))

        self.server.server_protocol_class = ChunkedServerProtocol
        self.server.start_server(33333, '127.0.0.1')
        self.assertEqual(0,
                         len(self.client_blob_manager.completed_blob_hashes))
        await asyncio.wait_for(self._test_transfer_stream(10), timeout=2)
        self.assertEqual(11,
                         len(self.client_blob_manager.completed_blob_hashes))

    async def test_create_and_decrypt_one_blob_stream(self,
                                                      blobs=1,
                                                      corrupt=False):
        descriptor = await self.create_stream(blobs)

        # copy blob files
        shutil.copy(
            os.path.join(self.server_blob_manager.blob_dir, self.sd_hash),
            os.path.join(self.client_blob_manager.blob_dir, self.sd_hash))
        self.stream = ManagedStream(self.loop, self.client_config,
                                    self.client_blob_manager, self.sd_hash,
                                    self.client_dir)

        for blob_info in descriptor.blobs[:-1]:
            shutil.copy(
                os.path.join(self.server_blob_manager.blob_dir,
                             blob_info.blob_hash),
                os.path.join(self.client_blob_manager.blob_dir,
                             blob_info.blob_hash))
            if corrupt and blob_info.length == MAX_BLOB_SIZE:
                with open(
                        os.path.join(self.client_blob_manager.blob_dir,
                                     blob_info.blob_hash), "rb+") as handle:
                    handle.truncate()
                    handle.flush()
        await self.stream.setup()
        await self.stream.finished_writing.wait()
        if corrupt:
            return self.assertFalse(
                os.path.isfile(os.path.join(self.client_dir, "test_file")))

        with open(os.path.join(self.client_dir, "test_file"), "rb") as f:
            decrypted = f.read()
        self.assertEqual(decrypted, self.stream_bytes)

        self.assertEqual(
            True,
            self.client_blob_manager.get_blob(self.sd_hash).get_is_verified())
        self.assertEqual(
            True,
            self.client_blob_manager.get_blob(
                self.stream.descriptor.blobs[0].blob_hash).get_is_verified())
        #
        # # its all blobs + sd blob - last blob, which is the same size as descriptor.blobs
        # self.assertEqual(len(descriptor.blobs), len(await downloader_storage.get_all_finished_blobs()))
        # self.assertEqual(
        #     [descriptor.sd_hash, descriptor.blobs[0].blob_hash], await downloader_storage.get_blobs_to_announce()
        # )
        #
        # await downloader_storage.close()
        # await self.storage.close()

    async def test_create_and_decrypt_multi_blob_stream(self):
        await self.test_create_and_decrypt_one_blob_stream(10)