예제 #1
0
class GftpProvider(StorageProvider, AsyncContextManager[StorageProvider]):
    """A StorageProvider that communicates with `gftp server` through JSON-RPC.

    The provider keeps track of the files published by `gftp` and their URLs.
    If an URL no longer needs to be published then the provider _should_ issue the
    `gftp close URL` command, so the file published with this URL is closed by `gftp`.

    However, `gftp close URL` may cause errors, due to a bug in `gftp` prior to version 0.7.3
    (see https://github.com/golemfactory/yagna/pull/1501). Therefore the provider uses
    the following logic to dermine if it should use the `gftp close URL` command:
    1. If the environment variable `YAPAPI_USE_GFTP_CLOSE` is set to a truthy value,
       then `gftp close URL` will be used.
    2. If the environment variable `YAPAPI_USE_GFTP_CLOSE` is set to a falsy value,
       then `gftp close URL` will not be used.
    3. If neither 1 nor 2 holds and the version reported by `gftp` is 0.7.3 or larger
       (according to Semantic Versioning 2.0.0) then `gftp close URL` will be used.
    4. Otherwise `gftp close URL` will not be used.

    Note: Reading the `YAPAPI_USE_GFTP_CLOSE` variable is done once, when the provider
    is instantiated, and the version check is made in the provider's `__aenter__()` method.
    """

    @dataclass
    class URLInfo:
        """Information about an URL published through `gftp`."""

        publish_count: int
        """Number of `gftp publish` operations for this URL.

        Serves as a reference counter. When it drops to 0, `gftp close {URL}` is invoked
        in order to release any file published with this URL that is kept open by `gftp`.
        Note that the value of this field may be larger than the number of files published,
        since a single file may be published more than once."""

        temporary_files: Set[Path]
        """Set of temporary files published with this URL.

        When the URL is unpublished by calling `gftp close {URL}`, all temporary files with this
        URL can be safely deleted.
        """

    def __init__(self, *, tmpdir: Optional[str] = None):
        self.__exit_stack = AsyncExitStack()

        # Directory for temporal files created by this provider
        self._temp_dir: Optional[Path] = Path(tmpdir) if tmpdir else None

        # Mapping of URLs to info on files published with this URL
        self._published_sources: Dict[str, GftpProvider.URLInfo] = dict()

        # Lock used to synchronize access to self._published_sources
        self._lock: asyncio.Lock = asyncio.Lock()

        # Flag indicating if this `GftpProvider` will close unpublished URLs.
        # See this class' docstring for more details.
        self._close_urls: Optional[bool] = read_use_gftp_close_env_var()

        # Reference to an external process running the `gftp server` command
        self._process: Optional["__Process"] = None

    async def __aenter__(self) -> StorageProvider:
        if not self._temp_dir:
            self._temp_dir = Path(
                self.__exit_stack.enter_context(tempfile.TemporaryDirectory(prefix="yapapi-gftp-"))
            )
            _logger.debug("Creating a temporary directory %s", self._temp_dir)
        process = await self.__get_process()
        gftp_version = await process.version()
        assert gftp_version

        if self._close_urls is None:
            try:
                # Gftp_version could be something like `7.2.3 (10116c7d 2021-07-28 build #164)`,
                # we need to discard everything after the first space.
                semver = semantic_version.Version(gftp_version.split()[0])
                self._close_urls = semver >= MIN_GFTP_VERSION_THAT_CAN_GFTP_CLOSE
                _logger.debug(
                    "Setting _close_urls to %s, gftp version: %s", self._close_urls, gftp_version
                )
            except ValueError:
                _logger.warning("Cannot parse gftp version info '%s'", gftp_version)
                self._close_urls = False
        assert self._close_urls is not None

        return self

    async def __aexit__(
        self,
        exc_type: Optional[Type[BaseException]],
        exc_value: Optional[BaseException],
        traceback: Optional[TracebackType],
    ) -> Optional[bool]:
        await self.__exit_stack.aclose()
        # Remove temporary files created by this provider
        if not self._temp_dir:
            raise RuntimeError("GftpProvider.__aenter__() not called")
        if self._temp_dir and self._temp_dir.exists():
            for info in self._published_sources.values():
                for path in info.temporary_files:
                    _delete_if_exists(path)

        return None

    def __new_file(self) -> Path:
        if not self._temp_dir:
            raise RuntimeError("GftpProvider.__aenter__() not called")
        return self.__exit_stack.enter_context(_temp_file(self._temp_dir))

    async def __get_process(self) -> GftpDriver:
        _debug = bool(os.getenv("DEBUG_GFTP"))
        process = self._process or (await self.__exit_stack.enter_async_context(service(_debug)))
        if not self._process:
            self._process = process
        return process

    async def upload_stream(self, length: int, stream: AsyncIterator[bytes]) -> Source:
        file_name = self.__new_file()
        with open(file_name, "wb") as f:
            async for chunk in stream:
                f.write(chunk)
        return await self.upload_file(file_name, _temporary=True)

    async def upload_file(self, path: os.PathLike, _temporary: bool = False) -> Source:

        path = Path(path)
        _logger.debug("Publishing file %s...", path)
        process = await self.__get_process()

        async with self._lock:

            links = await process.publish(files=[str(path)])
            assert len(links) == 1, "Invalid gftp publish response"

            length = path.stat().st_size

            url = links[0]["url"]

            if url not in self._published_sources:
                info = GftpProvider.URLInfo(
                    publish_count=1,
                    temporary_files=({path} if _temporary else set()),
                )
                self._published_sources[url] = info
            else:
                info = self._published_sources[url]

                if path in info.temporary_files:
                    raise ValueError(f"File {path} already published as temporary")

                if _temporary:
                    info.temporary_files.add(path)
                info.publish_count += 1

            _logger.debug(
                "File %s published with URL = %s, count = %d", path, url, info.publish_count
            )

        source = GftpSource(length, links[0])
        return source

    async def release_source(self, source: Source) -> None:

        if not isinstance(source, GftpSource):
            raise ValueError(f"Expected an instance of GftpSource, got {type(source)} instead")

        url = source.download_url
        _logger.debug("Releasing file %s with URL = %s ...", source.path, url)

        async with self._lock:

            if url not in self._published_sources:
                raise ValueError(
                    f"Trying to release an unpublished URL {url}, path = {source.path}"
                )
            info = self._published_sources[url]
            info.publish_count -= 1

            _logger.debug(
                "File %s released, URL = %s, count = %d", source.path, url, info.publish_count
            )

            if info.publish_count == 0:

                _logger.debug("Unpublishing URL %s...", url)
                if self._close_urls:
                    process = await self.__get_process()
                    await process.close(urls=[url])

                for path in info.temporary_files:
                    _delete_if_exists(path)

                del self._published_sources[url]

    async def new_destination(self, destination_file: Optional[PathLike] = None) -> Destination:
        if destination_file:
            if Path(destination_file).exists():
                destination_file = None
        output_file = str(destination_file) if destination_file else str(self.__new_file())
        process = await self.__get_process()
        link = await process.receive(output_file=output_file)
        return GftpDestination(process, link)
예제 #2
0
class ManagerTestCase(unittest.TestCase):
    maxDiff = None  # unittest: show full diff on assertion failure

    def setUp(self):
        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)

        self.server = MockupDB(auto_ismaster={"maxWireVersion": 6})
        self.server.run()
        self.server.autoresponds(
            Command("find", "switch_collection",
                    namespace="topology_database"),
            {
                "cursor": {
                    "id":
                    0,
                    "firstBatch": [{
                        **d, "_id": i
                    } for i, d in enumerate(TOPOLOGY_DATABASE_DATA)],
                }
            },
        )

        self._stack = AsyncExitStack()

        td = self._stack.enter_context(tempfile.TemporaryDirectory())
        self.rpc_unix_sock = os.path.join(td, "l.sock")

        self._stack.enter_context(
            patch.object(settings, "REMOTE_DATABASE_MONGO_URI",
                         self.server.uri))
        self._stack.enter_context(
            patch.object(settings, "NEGOTIATOR_RPC_UNIX_SOCK_PATH",
                         self.rpc_unix_sock))
        self._stack.enter_context(
            patch("agile_mesh_network.ryu.amn_manager.OVSManager",
                  DummyOVSManager))
        self._stack.enter_context(
            # To avoid automatic connection to a relay.
            patch.object(settings, "IS_RELAY", True))

        self._stack.enter_context(
            patch.object(events_scheduler, "RyuAppEventLoopScheduler"))
        self.ryu_ev_loop_scheduler = events_scheduler.RyuAppEventLoopScheduler(
        )
        self._stack.enter_context(self.ryu_ev_loop_scheduler)

        async def command_cb(session, msg):
            assert isinstance(msg, RPCCommand)
            await self._rpc_command_cb(msg)

        self.rpc_server = self.loop.run_until_complete(
            self._stack.enter_async_context(
                RPCUnixServer(self.rpc_unix_sock, command_cb)))

    async def _rpc_command_cb(self, msg: RPCCommand):
        self.assertEqual(msg.name, "dump_tunnels_state")
        await msg.respond({"tunnels": []})

    def tearDown(self):
        self.loop.run_until_complete(self._stack.aclose())

        self.loop.run_until_complete(self.loop.shutdown_asyncgens())
        self.loop.close()

        self.server.stop()

    def test_topology_database_sync(self):
        async def f():
            async with AgileMeshNetworkManager(
                    ryu_ev_loop_scheduler=self.ryu_ev_loop_scheduler
            ) as manager:
                manager.start_initialization()

                topology_database = manager.topology_database
                local_database = topology_database.local
                await local_database.is_filled_event.wait()
                self.assertTrue(local_database.is_filled)

                self.assertListEqual(
                    topology_database.find_random_relay_switches(),
                    [SwitchEntity.from_dict(SWITCH_ENTITY_RELAY_DATA)],
                )

                with self.assertRaises(KeyError):
                    topology_database.find_switch_by_mac(UNK_MAC)

                self.assertEqual(
                    topology_database.find_switch_by_mac(
                        SWITCH_ENTITY_BOARD_DATA["mac"]),
                    SwitchEntity.from_dict(SWITCH_ENTITY_BOARD_DATA),
                )

                self.assertListEqual(
                    topology_database.find_switches_by_mac_list([]), [])
                self.assertListEqual(
                    topology_database.find_switches_by_mac_list([UNK_MAC]), [])
                self.assertListEqual(
                    topology_database.find_switches_by_mac_list(
                        [UNK_MAC, SWITCH_ENTITY_BOARD_DATA["mac"]]),
                    [SwitchEntity.from_dict(SWITCH_ENTITY_BOARD_DATA)],
                )

                # TODO after resync extra tunnels/flows are destroyed

        self.loop.run_until_complete(asyncio.wait_for(f(), timeout=3))

    def test_rpc(self):
        async def f():
            rpc_responses = iter([
                ("dump_tunnels_state", {
                    "tunnels": [TUNNEL_MODEL_BOARD_DATA]
                }),
                (
                    "create_tunnel",
                    {
                        "tunnel":
                        TUNNEL_MODEL_RELAY_DATA,
                        "tunnels": [
                            TUNNEL_MODEL_BOARD_DATA,
                            TUNNEL_MODEL_RELAY_DATA,
                        ],
                    },
                ),
                (
                    "create_tunnel",
                    {
                        "tunnel": TUNNEL_MODEL_BOARD_DATA,
                        "tunnels": [TUNNEL_MODEL_BOARD_DATA],
                    },
                ),
            ])

            async def _rpc_command_cb(msg: RPCCommand):
                name, resp = next(rpc_responses)
                self.assertEqual(msg.name, name)
                await msg.respond(resp)

            with ExitStack() as stack:
                stack.enter_context(
                    patch.object(self, "_rpc_command_cb", _rpc_command_cb))
                stack.enter_context(patch.object(settings, "IS_RELAY", False))

                async with AgileMeshNetworkManager(
                        ryu_ev_loop_scheduler=self.ryu_ev_loop_scheduler
                ) as manager:
                    manager.start_initialization()
                    await manager._initialization_task

                    self.assertDictEqual({}, manager._tunnel_creation_tasks)

                    # Don't attempt to connect to unknown macs.
                    manager.ask_for_tunnel(UNK_MAC)
                    self.assertDictEqual({}, manager._tunnel_creation_tasks)

                    # Connect to a switch, ensure that the task is cleaned up.
                    manager.ask_for_tunnel(SECOND_MAC)
                    await next(iter(manager._tunnel_creation_tasks.values()))
                    self.assertDictEqual({}, manager._tunnel_creation_tasks)

                    # Send a broadcast
                    await next(iter(self.rpc_server.sessions)).issue_broadcast(
                        "tunnel_created",
                        {
                            "tunnel": TUNNEL_MODEL_RELAY_DATA,
                            "tunnels": [TUNNEL_MODEL_RELAY_DATA],
                        },
                    )
                    await asyncio.sleep(0.001)

                    # TODO unknown tunnels after resync are dropped via RPC

            expected_event_calls = [
                # Initialization list:
                [TunnelModel.from_dict(TUNNEL_MODEL_BOARD_DATA)],
                # Initialization relay tunnel:
                [
                    TunnelModel.from_dict(TUNNEL_MODEL_BOARD_DATA),
                    TunnelModel.from_dict(TUNNEL_MODEL_RELAY_DATA),
                ],
                # ask_for_tunnel:
                [TunnelModel.from_dict(TUNNEL_MODEL_BOARD_DATA)],
                # Broadcast:
                [TunnelModel.from_dict(TUNNEL_MODEL_RELAY_DATA)],
            ]
            for (args, kwargs), ev_expected in zip_equal(
                    self.ryu_ev_loop_scheduler.send_event_to_observers.
                    call_args_list,
                    expected_event_calls,
            ):
                ev = args[0]
                self.assertListEqual(
                    sorted(t for t, _ in ev.mac_to_tunswitch.values()),
                    sorted(ev_expected),
                )

        self.loop.run_until_complete(asyncio.wait_for(f(), timeout=3))

    def test_flows(self):
        async def f():
            async with AgileMeshNetworkManager(
                    ryu_ev_loop_scheduler=self.ryu_ev_loop_scheduler
            ) as manager:
                manager.start_initialization()
                # TODO missing flows from RPC sync are added
                # TODO after packet in a tunnel creation request is sent
                # TODO after tunnel creation a flow is set up
                pass

        self.loop.run_until_complete(asyncio.wait_for(f(), timeout=3))
예제 #3
0
class AgileMeshNetworkManager:
    # Execution context: run entirely in the asyncio event loop,
    # no thread safety is required.

    def __init__(self, *, ryu_ev_loop_scheduler: RyuAppEventLoopScheduler) -> None:
        self.ryu_ev_loop_scheduler = ryu_ev_loop_scheduler
        self.topology_database = TopologyDatabase()
        self.negotiator_rpc = NegotiatorRPC(settings.NEGOTIATOR_RPC_UNIX_SOCK_PATH)
        self.ovs_manager = OVSManager(
            datapath_id=settings.OVS_DATAPATH_ID,
            # TODO ryu_app.CONF?
        )

        self._stack = AsyncExitStack()
        self._initialization_task = None
        self._tunnel_creation_tasks: Mapping[asyncio.Future, asyncio.Future] = {}

        self.topology_database.add_local_db_synced_callback(self._event_db_synced)

        self.negotiator_rpc.add_tunnels_changed_callback(
            self._event_negotiator_tunnels_update
        )

    async def __aenter__(self):
        try:
            self._stack.enter_context(self.ovs_manager)
            await self._stack.enter_async_context(self.topology_database)
            await self._stack.enter_async_context(self.negotiator_rpc)
        except:
            await self._stack.aclose()
            raise
        return self

    def start_initialization(self):
        assert self._initialization_task is None
        self._initialization_task = asyncio.ensure_future(self._initialization())

    async def __aexit__(self, exc_type, exc_value, exc_tb):
        if self._initialization_task:
            self._initialization_task.cancel()
        for task in self._tunnel_creation_tasks:
            task.cancel()
        self._tunnel_creation_tasks.clear()
        await self._stack.aclose()

    async def _initialization(self):
        logger.info("Initial sync: waiting for Local DB to initialize...")
        await self.topology_database.local.is_filled_event.wait()
        logger.info("Initial sync: waiting for Local DB to initialize: done.")
        logger.info("Initial sync: retrieving tunnels from negotiator...")
        tunnels = None
        while True:
            try:
                tunnels = await self.negotiator_rpc.list_tunnels()
                logger.info("Initial sync: retrieving tunnels from negotiator: done.")
                break
            except CancelledError:
                return
            except:
                logger.error(
                    "Initial sync: failed to retrieve tunnels from Negotiator.",
                    exc_info=True,
                )
                await asyncio.sleep(5)
        while True:
            relay_switch = None
            try:
                if self._is_relay_connected(tunnels):
                    logger.info("Initial sync: no need to connect to a relay.")
                else:
                    # TODO support multiple switches?
                    relay_switch, = self.topology_database.find_random_relay_switches(1)
                    logger.info("Initial sync: connecting to %s...", relay_switch)
                    await self.connect_switch(relay_switch)
                    logger.info("Initial sync: connecting to %s: done.", relay_switch)
                break
            except CancelledError:
                return
            except:
                logger.error(
                    "Initial sync: failed to connect to relay switch %s.",
                    relay_switch,
                    exc_info=True,
                )
                await asyncio.sleep(5)
        logger.info("Initial sync: complete!")

    # These methods must be fast and not throw any exceptions.
    def _event_db_synced(self):
        pass  # TODO

    def _event_negotiator_tunnels_update(
        self, topic: str, tunnel: TunnelModel, tunnels: Sequence[TunnelModel]
    ) -> None:
        # Note that for the tunnel_created response this would be
        # called twice in a row.
        if not self.topology_database.local.is_filled:
            logger.error(
                "Skipping negotiator event, because Local DB is not initialized yet"
            )
            return

        logger.debug("Processing a list of tunnels from Negotiator: %s", tunnels)

        valid, invalid, mac_to_tunnel, mac_to_switch = self._filter_tunnels(tunnels)

        valid_tunnels = [mac_to_tunnel[mac] for mac in valid]
        invalid_tunnels = [mac_to_tunnel[mac] for mac in invalid]
        logger.debug(
            "Negotiator tunnels processed. Valid: %s. Invalid: %s",
            valid_tunnels,
            invalid_tunnels,
        )

        # TODO for invalid_tunnels - send tunnel_stop command to negotiator

        # TODO ?? maybe don't always make a full sync, but use more granular
        # events (like a tunnel has been added/removed)?
        self.ryu_ev_loop_scheduler.send_event_to_observers(
            events.EventActiveTunnelsList(
                {mac: (mac_to_tunnel[mac], mac_to_switch[mac]) for mac in valid}
            )
        )

    def _filter_tunnels(self, tunnels):
        for t in tunnels:
            assert t.src_mac == self.ovs_manager.bridge_mac, (
                f"Negotiator accepted a tunnel which src MAC {t.src_mac} doesn't "
                f"match OVS bridge's {self.ovs_manager.bridge_mac} one."
            )

        existing_switches = self.topology_database.find_switches_by_mac_list(
            [t.dst_mac for t in tunnels]
        )

        mac_to_tunnel = {t.dst_mac: t for t in tunnels}
        mac_to_switch = {s.mac: s for s in existing_switches}

        valid_tunnels = list(mac_to_tunnel.keys() & mac_to_switch.keys())
        invalid_tunnels = list(mac_to_tunnel.keys() - mac_to_switch.keys())
        return valid_tunnels, invalid_tunnels, mac_to_tunnel, mac_to_switch

    def _is_relay_connected(self, tunnels):
        if settings.IS_RELAY:
            return True
        valid, _, _, mac_to_switch = self._filter_tunnels(tunnels)
        relays = [mac_to_switch[m] for m in valid if mac_to_switch[m].is_relay]
        assert len(relays) <= 1  # TODO drop extra
        if relays:
            logger.info("Relay is connected: %s", relays[0])
        return bool(relays)

    async def connect_switch(self, switch: SwitchEntity):
        # TODO layers? udp? negotiation?
        tcp = NegotiatorProtocolValue("tcp")
        dest_tcp = switch.layers_config.negotiator[tcp]
        layers = LayersDescriptionRPCModel.from_dict(
            {"dest": dest_tcp, "protocol": tcp, "layers": switch.layers_config.layers}
        )
        await self.negotiator_rpc.start_tunnel(
            src_mac=self.ovs_manager.bridge_mac,
            dst_mac=switch.mac,
            timeout=20,
            layers=layers,
        )  # TODO track result? timeout?

    def ask_for_tunnel(self, dst_mac):
        try:
            switch = self.topology_database.find_switch_by_mac(dst_mac)
        except KeyError:
            logger.warning(
                f"Unable to connect to {dst_mac}: no such Switch in the database"
            )
            return

        t = None

        async def task():
            nonlocal t
            try:
                await self.connect_switch(switch)
            except CancelledError:
                return
            except:
                logger.warning(f"Failed to connect to {dst_mac}", exc_info=True)
            finally:
                self._tunnel_creation_tasks.pop(t, None)

        t = asyncio.ensure_future(task())
        self._tunnel_creation_tasks[t] = t
예제 #4
0
class GftpProvider(StorageProvider, AsyncContextManager[StorageProvider]):
    _temp_dir: Optional[Path]
    _registered_sources: Dict[str, GftpSource]

    def __init__(self, *, tmpdir: Optional[str] = None):
        self.__exit_stack = AsyncExitStack()
        self._temp_dir = Path(tmpdir) if tmpdir else None
        self._registered_sources = dict()
        self._process = None

    async def __aenter__(self) -> StorageProvider:
        self._temp_dir = Path(self.__exit_stack.enter_context(tempfile.TemporaryDirectory()))
        process = await self.__get_process()
        _ver = await process.version()
        # TODO check version
        assert _ver
        return self

    async def __aexit__(
        self,
        exc_type: Optional[Type[BaseException]],
        exc_value: Optional[BaseException],
        traceback: Optional[TracebackType],
    ) -> Optional[bool]:
        await self.__exit_stack.aclose()
        return None

    def __new_file(self) -> Path:
        temp_dir: Path = self._temp_dir or Path(
            self.__exit_stack.enter_context(tempfile.TemporaryDirectory())
        )
        if not self._temp_dir:
            self._temp_dir = temp_dir
        return self.__exit_stack.enter_context(_temp_file(temp_dir))

    async def __get_process(self) -> GftpDriver:
        _debug = bool(os.getenv("DEBUG_GFTP"))
        process = self._process or (await self.__exit_stack.enter_async_context(service(_debug)))
        if not self._process:
            self._process = process
        return process

    async def upload_stream(self, length: int, stream: AsyncIterator[bytes]) -> Source:
        file_name = self.__new_file()
        with open(file_name, "wb") as f:
            async for chunk in stream:
                f.write(chunk)
        return await self.upload_file(file_name)

    async def upload_file(self, path: os.PathLike) -> Source:
        hasher = hashlib.sha3_256()
        with open(path, "rb") as f:
            while True:
                bytes = f.read(4096)
                if not bytes:
                    break
                hasher.update(bytes)
        digest = hasher.hexdigest()
        if digest in self._registered_sources:
            _logger.debug("File %s already published, digest: %s", path, digest)
            return self._registered_sources[digest]
        _logger.debug("Publishing file %s, digest: %s", path, digest)

        process = await self.__get_process()
        links = await process.publish(files=[str(path)])
        length = Path(path).stat().st_size
        assert len(links) == 1, "invalid gftp publish response"
        source = GftpSource(length, links[0])
        self._registered_sources[digest] = source
        return source

    async def new_destination(self, destination_file: Optional[PathLike] = None) -> Destination:
        if destination_file:
            if Path(destination_file).exists():
                destination_file = None
        output_file = str(destination_file) if destination_file else str(self.__new_file())
        process = await self.__get_process()
        link = await process.receive(output_file=output_file)
        return GftpDestination(process, link)
예제 #5
0
파일: gftp.py 프로젝트: prekucki/yapapi
class GftpProvider(StorageProvider, AsyncContextManager[StorageProvider]):
    _temp_dir: Optional[Path]

    def __init__(self, *, tmpdir: Optional[str] = None):
        self.__exit_stack = AsyncExitStack()
        self._temp_dir = Path(tmpdir) if tmpdir else None
        self._process = None

    async def __aenter__(self) -> StorageProvider:
        self._temp_dir = Path(
            self.__exit_stack.enter_context(tempfile.TemporaryDirectory()))
        process = await self.__get_process()
        _ver = await process.version()
        # TODO check version
        assert _ver
        return self

    async def __aexit__(
        self,
        exc_type: Optional[Type[BaseException]],
        exc_value: Optional[BaseException],
        traceback: Optional[TracebackType],
    ) -> Optional[bool]:
        await self.__exit_stack.aclose()
        return None

    def __new_file(self) -> Path:
        temp_dir: Path = self._temp_dir or Path(
            self.__exit_stack.enter_context(tempfile.TemporaryDirectory()))
        if not self._temp_dir:
            self._temp_dir = temp_dir
        return self.__exit_stack.enter_context(_temp_file(temp_dir))

    async def __get_process(self) -> GftpDriver:
        _debug = bool(os.getenv("DEBUG_GFTP"))
        process = self._process or (await
                                    self.__exit_stack.enter_async_context(
                                        service(_debug)))
        if not self._process:
            self._process = process
        return process

    async def upload_stream(self, length: int,
                            stream: AsyncIterator[bytes]) -> Source:
        file_name = self.__new_file()
        with open(file_name, "wb") as f:
            async for chunk in stream:
                f.write(chunk)
        process = await self.__get_process()
        links = await process.publish(files=[str(file_name)])
        assert len(links) == 1, "invalid gftp publish response"
        link = links[0]
        return GftpSource(length, link)

    async def upload_file(self, path: os.PathLike) -> Source:
        process = await self.__get_process()
        links = await process.publish(files=[str(path)])
        length = Path(path).stat().st_size
        assert len(links) == 1, "invalid gftp publish response"
        return GftpSource(length, links[0])

    async def new_destination(self,
                              destination_file: Optional[PathLike] = None
                              ) -> Destination:
        if destination_file:
            if Path(destination_file).exists():
                destination_file = None
        output_file = str(destination_file) if destination_file else str(
            self.__new_file())
        process = await self.__get_process()
        link = await process.receive(output_file=output_file)
        return GftpDestination(process, link)