Example #1
0
 async def format_and_send(ctx: commands.Context,
                           obj: Any,
                           *,
                           is_owner: bool = False) -> None:
     source = obj
     if isinstance(obj, commands.Cog):
         source = type(obj)
     elif isinstance(obj, commands.Command):
         source = obj.callback
         if not source.__module__:
             # probably some kind of custom-coded command
             if is_owner:
                 return await ctx.invoke(
                     ctx.bot.get_command("instantcmd source"),
                     command=obj.qualified_name)
             else:
                 raise OSError
     elif isinstance(obj, property):
         source = obj.fget
     elif isinstance(
             obj,
         (discord.utils.cached_property, discord.utils.CachedSlotProperty)):
         source = obj.function
     try:
         lines, line = inspect.getsourcelines(source)
         source_file = inspect.getsourcefile(source)
     except TypeError:
         if isinstance(source, type):
             raise
         source = type(source)
         lines, line = inspect.getsourcelines(source)
         source_file = inspect.getsourcefile(source)
     module = getattr(inspect.getmodule(source), "__name__", None)
     if source_file and module and source_file.endswith("__init__.py"):
         full_module = f"{module}.__init__"
     else:
         full_module = module
     is_installed = False
     header: str = ""
     if full_module:
         if full_module.startswith("discord."):
             is_installed = True
             if discord.__version__[-1].isdigit():
                 dpy_commit = "v" + discord.__version__
             else:
                 try:
                     dpy_version = version("discord.py").split("+g")
                 except PackageNotFoundError:
                     dpy_commit = "master"
                 else:
                     dpy_commit = dpy_version[1] if len(
                         dpy_version) == 2 else "master"
             header = f"<https://github.com/Rapptz/discord.py/blob/{dpy_commit}/{full_module.replace('.', '/')}.py#L{line}-L{line + len(lines) - 1}>"
         elif full_module.startswith("redbot."):
             is_installed = True
             if "dev" in redbot.__version__:
                 red_commit = "V3/develop"
             else:
                 red_commit = redbot.__version__
             header = f"<https://github.com/Cog-Creators/Red-DiscordBot/blob/{red_commit}/{full_module.replace('.', '/')}.py#L{line}-L{line + len(lines) - 1}>"
         elif dl := ctx.bot.get_cog("Downloader"):
             is_installed, installable = await dl.is_installed(
                 full_module.split(".")[0])
             if is_installed:
                 if installable.repo is None:
                     is_installed = False
                 else:
                     if ("mikeshardmind" in installable.repo.url.lower()
                             or "sinbad" in installable.repo.url.lower()):
                         # Sinbad's license specifically disallows redistribution of code, as per Section 3.
                         #   Ref: https://github.com/mikeshardmind/SinbadCogs/blob/9cdcd042d57cc39c7330fcda50ecf580c055c313/LICENSE#L73-L76
                         # Raising OSError here will prevent even bot owners from viewing the code.
                         raise Unlicensed()
                     else:
                         url = yarl.URL(installable.repo.url)
                         if url.user or url.password:
                             is_installed = False
                         header = f"<{installable.repo.clean_url.rstrip('/')}/blob/{installable.commit}/{full_module.replace('.', '/')}.py#L{line}-L{line + len(lines) - 1}>"
Example #2
0
 def test_make_filename_relative(self):
     self.url = yarl.URL("/images")
     self.filename, self.hashname = self.handler._make_filename(self.url)
     self.assertEqual(self.filename, "/images")
     self.assertEqual(self.hashname, "41389bcf7f7427468d8c8675db2d4f98")
Example #3
0
    async def fetch(self, url, method='GET', headers=None, body=None):
        """Perform a HTTP request and return decoded JSON data"""
        request_headers = self.prepare_request_headers(headers)
        url = self.proxy + url

        if self.verbose:
            self.print("\nRequest:", method, url, headers, body)
        self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)

        request_body = body
        encoded_body = body.encode() if body else None
        self.open()
        session_method = getattr(self.session, method.lower())

        http_response = None
        http_status_code = None
        http_status_text = None
        json_response = None
        try:
            async with session_method(yarl.URL(url, encoded=True),
                                      data=encoded_body,
                                      headers=request_headers,
                                      timeout=(self.timeout / 1000),
                                      proxy=self.aiohttp_proxy) as response:
                http_response = await response.text()
                http_status_code = response.status
                http_status_text = response.reason
                json_response = self.parse_json(http_response)
                headers = response.headers
                if self.enableLastHttpResponse:
                    self.last_http_response = http_response
                if self.enableLastResponseHeaders:
                    self.last_response_headers = headers
                if self.enableLastJsonResponse:
                    self.last_json_response = json_response
                if self.verbose:
                    self.print("\nResponse:", method, url, http_status_code,
                               headers, http_response)
                self.logger.debug("%s %s, Response: %s %s %s", method, url,
                                  http_status_code, headers, http_response)

        except socket.gaierror as e:
            details = ' '.join([self.id, method, url])
            raise ExchangeNotAvailable(details) from e

        except (concurrent.futures.TimeoutError, asyncio.TimeoutError) as e:
            details = ' '.join([self.id, method, url])
            raise RequestTimeout(details) from e

        except aiohttp.ClientConnectionError as e:
            details = ' '.join([self.id, method, url])
            raise ExchangeNotAvailable(details) from e

        except aiohttp.ClientError as e:  # base exception class
            details = ' '.join([self.id, method, url])
            raise ExchangeError(details) from e

        self.handle_errors(http_status_code, http_status_text, url, method,
                           headers, http_response, json_response,
                           request_headers, request_body)
        self.handle_http_status_code(http_status_code, http_status_text, url,
                                     method, http_response)
        if json_response is not None:
            return json_response
        if self.is_text_response(headers):
            return http_response
        return response.content
Example #4
0
 def get_url(self):
     return yarl.URL('/') / self.typename / str(self.id)
Example #5
0
File: util.py Project: rikroe/core
def spotify_uri_from_media_browser_url(media_content_id: str) -> str:
    """Extract spotify URI from media browser URL."""
    if media_content_id and media_content_id.startswith(MEDIA_PLAYER_PREFIX):
        parsed_url = yarl.URL(media_content_id)
        media_content_id = parsed_url.name
    return media_content_id
Example #6
0
def cli(url: str, name: str) -> None:
    output_dir = Path.cwd() / "output" / name
    if output_dir.is_dir():
        keep_going = click.confirm(f"Directory '{output_dir}' already exists. Do you want to clear it?", default=True)
        if not keep_going:
            return
        shutil.rmtree(output_dir)
    output_dir.mkdir()
    driver = selenium.webdriver.Chrome()
    driver.get(url)

    house_info = driver.find_element_by_class_name("house-info").text
    features = driver.find_element_by_class_name("description").text
    description = driver.find_element_by_class_name("property-description").text
    with open(output_dir / "listing.txt", "w") as f:
        f.write(underline("Info"))
        f.write(house_info)
        f.write("\n")
        f.write(underline("Features"))
        f.write(features)
        f.write("\n")
        f.write(underline("Description"))
        f.write(description)

    e_container = driver.find_element_by_class_name("primary-photo-container")
    a = e_container.find_element_by_tag_name("a")
    a.click()

    photo_urls_script = driver.find_element_by_class_name("thumbnail").find_element_by_tag_name("script").get_attribute("innerHTML").strip()
    arr_string = photo_urls_script.split("=", 1)[1].rstrip(";")
    urls = json.loads(arr_string)
    e_carousel = driver.find_element_by_class_name("carousel")
    with httpx.Client() as client:
        e_images = retry(lambda: e_carousel.find_elements_by_tag_name("img"), "Could not get images")
        num_images = len(urls)
        max_index_length = len(str(num_images))
        ui = -1
        for ei, e_image in enumerate(e_images):
            print()
            title = e_image.get_attribute("title")
            print(ei + 1, title)
            if "virtual" in e_image.get_attribute("class"):
                print("virtual, skipping")
                continue
            ui += 1
            image_number = ui + 1
            src_thumb_raw = urls[ui]
            src_thumb = yarl.URL(src_thumb_raw)
            query_params = {**dict(src_thumb.query), **{"sm": "m", "w": 1260, "h": 1024}}
            src_full = src_thumb.with_query(query_params)
            print(src_thumb, src_full)
            r = client.get(str(src_full))
            r.raise_for_status()
            content_type = r.headers["content-type"]
            ext = mimetypes.guess_extension(content_type)
            clean_title = re.sub(r'[\\/:"*?<>|]+', "", title)
            base_filename = f"{image_number:0{max_index_length}d}-{clean_title}"
            dst = (output_dir / base_filename).with_suffix(ext)
            print("->", dst)
            dst.write_bytes(r.content)

    print(f"Zipping: {output_dir}")
    archive = shutil.make_archive(name, "zip", root_dir=output_dir)
    archive_dst = shutil.move(archive, output_dir)
    print(f"Zipfile is at: {archive_dst}")
        help="The location to store the files that allow resuming a scrape "
        "in the event of a failure. Defaults to the working directory.",
        default=".",
    )

    args = parser.parse_args()

    loop = asyncio.get_event_loop()
    with PersistenceHandler(
        os.path.join(args.persist_location, args.network_address)
    ) as persistence_handler:
        task = loop.create_task(
            scrape_dvwa(
                persistence_handler,
                sql_injection_vulnerability_checker,
                yarl.URL(f"http://{args.network_address}/index.php"),
                args.username,
                args.password,
            )
        )
        loop.run_until_complete(task)
        vulnerability_results = task.result()
        vulnerabilities = [v for v in vulnerability_results if v.is_vulnerable]

        print(
            f"\nScraped {len(vulnerability_results)} pages and found "
            f"{len(vulnerabilities)} sql injection vulnerabilities in the "
            "following urls:"
        )
        for vuln in sorted(vulnerabilities, key=lambda x: x.url):
            print(f"\t{vuln.url}")
Example #8
0
 def _get_host_always(self, url: str) -> str:
     try:
         return yarl.URL(url).host or ''
     except (UnicodeError, ValueError):
         return ''
Example #9
0
    async def async_play_media(self, media_type, media_id, **kwargs):
        """Play a piece of media."""
        # Handle media_source
        if media_source.is_media_source_id(media_id):
            sourced_media = await media_source.async_resolve_media(
                self.hass, media_id)
            media_type = sourced_media.mime_type
            media_id = sourced_media.url

        extra = kwargs.get(ATTR_MEDIA_EXTRA, {})
        metadata = extra.get("metadata")

        # Handle media supported by a known cast app
        if media_type == CAST_DOMAIN:
            try:
                app_data = json.loads(media_id)
                if metadata is not None:
                    app_data["metadata"] = extra.get("metadata")
            except json.JSONDecodeError:
                _LOGGER.error("Invalid JSON in media_content_id")
                raise

            # Special handling for passed `app_id` parameter. This will only launch
            # an arbitrary cast app, generally for UX.
            if "app_id" in app_data:
                app_id = app_data.pop("app_id")
                _LOGGER.info("Starting Cast app by ID %s", app_id)
                await self.hass.async_add_executor_job(
                    self._chromecast.start_app, app_id)
                if app_data:
                    _LOGGER.warning(
                        "Extra keys %s were ignored. Please use app_name to cast media",
                        app_data.keys(),
                    )
                return

            app_name = app_data.pop("app_name")
            try:
                await self.hass.async_add_executor_job(quick_play,
                                                       self._chromecast,
                                                       app_name, app_data)
            except NotImplementedError:
                _LOGGER.error("App %s not supported", app_name)
            return

        # Try the cast platforms
        for platform in self.hass.data[CAST_DOMAIN].values():
            result = await platform.async_play_media(self.hass, self.entity_id,
                                                     self._chromecast,
                                                     media_type, media_id)
            if result:
                return

        # If media ID is a relative URL, we serve it from HA.
        media_id = async_process_play_media_url(self.hass, media_id)

        # Configure play command for when playing a HLS stream
        if is_hass_url(self.hass, media_id):
            parsed = yarl.URL(media_id)
            if parsed.path.startswith("/api/hls/"):
                extra = {
                    **extra,
                    "stream_type": "LIVE",
                    "media_info": {
                        "hlsVideoSegmentFormat": "fmp4",
                    },
                }

        # Default to play with the default media receiver
        app_data = {"media_id": media_id, "media_type": media_type, **extra}
        await self.hass.async_add_executor_job(quick_play, self._chromecast,
                                               "default_media_receiver",
                                               app_data)
Example #10
0
    created_url = benchmark(factory, NON_ASCII_URL)
    if isinstance(created_url, url.URL):
        assert str(created_url.punycode().escape()) == NON_ASCII_URL_ENCODED
    elif isinstance(created_url, furl.furl):
        assert created_url.url == NON_ASCII_URL_ENCODED
    elif isinstance(created_url, hyperlink.URL):
        assert created_url.to_uri().to_text() == NON_ASCII_URL_ENCODED
    else:
        assert str(created_url) == NON_ASCII_URL_ENCODED


@pytest.mark.benchmark(group='Conversion of URL to string')
@pytest.mark.parametrize(
    'created_url', [
        furl.furl(NON_ASCII_URL),
        yarl.URL(NON_ASCII_URL),
        urlobject.URLObject.from_iri(NON_ASCII_URL),
        url.URL.parse(NON_ASCII_URL),
        hyperlink.URL.from_text(NON_ASCII_URL)
    ],
    ids=['furl', 'yarl', 'urlobject', 'url-py', 'hyperlink'])
def test_url_to_string(benchmark, created_url):
    if isinstance(created_url, url.URL):
        result = benchmark(lambda: created_url.punycode().escape())
    elif isinstance(created_url, furl.furl):
        result = benchmark(created_url.tostr)
    elif isinstance(created_url, hyperlink.URL):
        result = benchmark(lambda: created_url.to_uri().to_text())
    else:
        result = benchmark(str, created_url)
Example #11
0
 def _build_url(self, segment):
     """Return a :class:`~yarl.URL` consisting of
     :attr:`OAuth root <_oauth_root>`, followed by `segment`.
     """
     return yarl.URL(self._oauth_root) / segment
Example #12
0
from .web_response import Response, StreamResponse

__all__ = ('RequestHandler', 'RequestPayloadError', 'PayloadAccessError')

if TYPE_CHECKING:  # pragma: no cover
    from .web_server import Server  # noqa

_RequestFactory = Callable[[
    RawRequestMessage, StreamReader, 'RequestHandler', AbstractStreamWriter,
    'asyncio.Task[None]'
], BaseRequest]

_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]

ERROR = RawRequestMessage('UNKNOWN', '/', HttpVersion10, {}, {}, True, False,
                          False, False, yarl.URL('/'))


class RequestPayloadError(Exception):
    """Payload parsing error."""


class PayloadAccessError(Exception):
    """Payload was accessed after response was sent."""


class RequestHandler(BaseProtocol):
    """HTTP protocol implementation.

    RequestHandler handles incoming HTTP request. It reads request line,
    request headers and request payload and calls handle_request() method.
Example #13
0
    async def async_play_media(self, media_type: str, media_id: str,
                               **kwargs: Any) -> None:
        """Play media from a URL or file, launch an application, or tune to a channel."""
        extra: dict[str, Any] = kwargs.get(ATTR_MEDIA_EXTRA) or {}
        original_media_type: str = media_type
        original_media_id: str = media_id
        mime_type: str | None = None
        stream_name: str | None = None
        stream_format: str | None = extra.get(ATTR_FORMAT)

        # Handle media_source
        if media_source.is_media_source_id(media_id):
            sourced_media = await media_source.async_resolve_media(
                self.hass, media_id, self.entity_id)
            media_type = MEDIA_TYPE_URL
            media_id = sourced_media.url
            mime_type = sourced_media.mime_type
            stream_name = original_media_id
            stream_format = guess_stream_format(media_id, mime_type)

        if media_type == FORMAT_CONTENT_TYPE[HLS_PROVIDER]:
            media_type = MEDIA_TYPE_VIDEO
            mime_type = FORMAT_CONTENT_TYPE[HLS_PROVIDER]
            stream_name = "Camera Stream"
            stream_format = "hls"

        if media_type in (MEDIA_TYPE_MUSIC, MEDIA_TYPE_URL, MEDIA_TYPE_VIDEO):
            # If media ID is a relative URL, we serve it from HA.
            media_id = async_process_play_media_url(self.hass, media_id)

            parsed = yarl.URL(media_id)

            if mime_type is None:
                mime_type, _ = mimetypes.guess_type(parsed.path)

            if stream_format is None:
                stream_format = guess_stream_format(media_id, mime_type)

            if extra.get(ATTR_FORMAT) is None:
                extra[ATTR_FORMAT] = stream_format

            if extra[ATTR_FORMAT] not in STREAM_FORMAT_TO_MEDIA_TYPE:
                _LOGGER.error(
                    "Media type %s is not supported with format %s (mime: %s)",
                    original_media_type,
                    extra[ATTR_FORMAT],
                    mime_type,
                )
                return

            if (media_type == MEDIA_TYPE_URL
                    and STREAM_FORMAT_TO_MEDIA_TYPE[extra[ATTR_FORMAT]]
                    == MEDIA_TYPE_MUSIC):
                media_type = MEDIA_TYPE_MUSIC

            if media_type == MEDIA_TYPE_MUSIC and "tts_proxy" in media_id:
                stream_name = "Text to Speech"
            elif stream_name is None:
                if stream_format == "ism":
                    stream_name = parsed.parts[-2]
                else:
                    stream_name = parsed.name

            if extra.get(ATTR_NAME) is None:
                extra[ATTR_NAME] = stream_name

        if media_type == MEDIA_TYPE_APP:
            params = {
                param: extra[attr]
                for attr, param in ATTRS_TO_LAUNCH_PARAMS.items()
                if attr in extra
            }

            await self.coordinator.roku.launch(media_id, params)
        elif media_type == MEDIA_TYPE_CHANNEL:
            await self.coordinator.roku.tune(media_id)
        elif media_type == MEDIA_TYPE_MUSIC:
            if extra.get(ATTR_ARTIST_NAME) is None:
                extra[ATTR_ARTIST_NAME] = "Home Assistant"

            params = {
                param: extra[attr]
                for (attr,
                     param) in ATTRS_TO_PLAY_ON_ROKU_AUDIO_PARAMS.items()
                if attr in extra
            }

            params = {"t": "a", **params}

            await self.coordinator.roku.play_on_roku(media_id, params)
        elif media_type in (MEDIA_TYPE_URL, MEDIA_TYPE_VIDEO):
            params = {
                param: extra[attr]
                for (attr, param) in ATTRS_TO_PLAY_ON_ROKU_PARAMS.items()
                if attr in extra
            }

            await self.coordinator.roku.play_on_roku(media_id, params)
        else:
            _LOGGER.error("Media type %s is not supported",
                          original_media_type)
            return

        await self.coordinator.async_request_refresh()
Example #14
0
def get(uri: str = "", *, base_url: str = None) -> _yarl.URL:
    if base_url is None:
        base_url = DefaultBaseUrl.get()
    uri = str(uri.lstrip("/"))
    return _yarl.URL(base_url) / uri
Example #15
0
from . import helpers, http
from .helpers import CeilTimeout
from .http import HttpProcessingError, HttpRequestParser, StreamWriter
from .log import access_logger, server_logger
from .streams import EMPTY_PAYLOAD
from .tcp_helpers import tcp_cork, tcp_keepalive, tcp_nodelay
from .web_exceptions import HTTPException
from .web_request import BaseRequest
from .web_response import Response


__all__ = ('RequestHandler', 'RequestPayloadError', 'PayloadAccessError')

ERROR = http.RawRequestMessage(
    'UNKNOWN', '/', http.HttpVersion10, {},
    {}, True, False, False, False, yarl.URL('/'))


class RequestPayloadError(Exception):
    """Payload parsing error."""


class PayloadAccessError(Exception):
    """Payload was accesed after responce was sent."""


class RequestHandler(asyncio.streams.FlowControlMixin, asyncio.Protocol):
    """HTTP protocol implementation.

    RequestHandler handles incoming HTTP request. It reads request line,
    request headers and request payload and calls handle_request() method.
Example #16
0
        "RequestHandler",
        AbstractStreamWriter,
        "asyncio.Task[None]",
    ],
    BaseRequest,
]

_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
_AnyAbstractAccessLogger = Union[
    Type[AbstractAsyncAccessLogger],
    Type[AbstractAccessLogger],
]


ERROR = RawRequestMessage(
    "UNKNOWN", "/", HttpVersion10, {}, {}, True, False, False, False, yarl.URL("/")
)


class RequestPayloadError(Exception):
    """Payload parsing error."""


class PayloadAccessError(Exception):
    """Payload was accessed after response was sent."""


class AccessLoggerWrapper(AbstractAsyncAccessLogger):
    """
    Wraps an AbstractAccessLogger so it behaves
    like an AbstractAsyncAccessLogger.
Example #17
0
 def __init__(self, url: Union[str, yarl.URL]) -> None:
     self._http_session = aiohttp.ClientSession()
     self._url = yarl.URL(url)
Example #18
0
def _get_request_host() -> Optional[str]:
    """Get the host address of the current request."""
    request = http.current_request.get()
    if request is None:
        raise NoURLAvailableError
    return yarl.URL(request.url).host
Example #19
0
 def add_cookie(self, key: str, value: str, domain: str) -> None:
     self.session.cookie_jar.update_cookies(cookies={key: value},
                                            response_url=yarl.URL(domain))
Example #20
0
def relative_url(url):
    """Convert an absolute url to a relative one."""
    return str(yarl.URL(url).relative())
Example #21
0
def read_attach_php(request: web.Request):
    try:
        id = request.url.query['id']
    except KeyError:
        raise web.HTTPBadRequest
    raise web.HTTPTemporaryRedirect(yarl.URL('/attachment') / id)
Example #22
0
def server_url(request, target) -> str:
    return str(yarl.URL().build(host=request.host,
                                scheme=request.scheme,
                                path="/" + target))
Example #23
0
load_dotenv()


def env_fail(var: str):
    """Warn about an empty env var."""
    print(f"Warning, missing env var: {var}")
    exit(1)


luhack_guild_id = 485103891298385923
potential_luhacker_role_id = 486250289691754496
prospective_luhacker_role_id = 588429073018126336
verified_luhacker_role_id = 486249689050644480
disciple_role_id = 506420426419732499
furry_master_cyber_wizard_role_id = 502203959239245854
master_cyber_wizard_role_id = 502197689434374144
trusted_role_ids = {
    disciple_role_id,
    furry_master_cyber_wizard_role_id,
    master_cyber_wizard_role_id,
}
bot_log_channel_id = 588443109994528816
challenge_log_channel_id = 576418418211684393
inner_magic_circle_id = 631618075254325257
writeups_base_url = yarl.URL("https://scc-luhack.lancs.ac.uk/writeups")
challenges_base_url = yarl.URL("https://scc-luhack.lancs.ac.uk/challenges")

from_email_address = os.getenv("FROM_EMAIL_ADDRESS") or env_fail(
    "FROM_EMAIL_ADDRESS")
is_test_mode = (os.getenv("TEST_MODE") or "0") == "1"
 def _filename_from_web_link(link: str):
     return yarl.URL(link).parts[-1]
Example #25
0
 def test_make_filename_same_host(self):
     self.filename, self.hashname = self.handler._make_filename(
         yarl.URL(self.root))
     self.assertEqual(self.filename, "/index.html")
     self.assertEqual(self.hashname, "d1546d731a9f30cc80127d57142a482b")
Example #26
0
class Client:
    """
    Main means of reading the 4chan API.

    Parameters:
        http: bool [True]
            whether to use http or https scheme
        hold: int [1]
            waiting time between requests
            consult https://github.com/4chan/4chan-API#api-rules
        limit: int [8000]
            maximum amount of concurrent requests
            this is to prevent "too many files" exceptions
        session: aiohttp.ClientSession [None]
            used for requesting
            if None, created automatically
        loop: asyncio.AbstractEventLoop [None]
            used and passed whethever applicable
            if None, fetched from asyncio.get_event_loop
    """

    __slots__ = ('_url', '_session', '_hold', '_lock', '_limit', '_loop')

    _base = yarl.URL('http://a.4cdn.org')

    _urls = (_base.with_scheme('https'), _base)

    def __init__(self, http=True, hold=1, limit=8000, session=None, loop=None):

        if not loop:

            loop = asyncio.get_event_loop()

        if not session:

            session = aiohttp.ClientSession(loop=loop)

        self._url = self._urls[http]

        self._hold = hold

        self._lock = asyncio.Lock(loop=loop)

        self._limit = asyncio.Semaphore(value=limit, loop=loop)

        self._session = session

        self._loop = loop

    @property
    def url(self):
        """
        Get the yarl.URL used for requests.
        """

        return self._url

    @property
    def session(self):
        """
        Get the aiohttp.ClientSession used for requests.
        """

        return self._session

    @property
    def loop(self):
        """
        Get the event loop used for async operations.
        """

        return self._loop

    async def _comply(self):

        await asyncio.sleep(self._hold, loop=self._loop)

        self._lock.release()

    async def interact(self, route):
        """
        Execute the request.

        Paramters:
            route: str
                base/{route}.json part of the url
            **kwargs: dict
                passed to the request call
        """

        if self._hold > 0:

            await self._lock.acquire()

            self._loop.create_task(self._comply())

        url = self._url.with_path(f'{route}.json')

        await self._limit.acquire()

        try:

            response = await self._session.request('GET', url)

        finally:

            self._limit.release()

        response.raise_for_status()

        payload = await response.json()

        return payload

    async def get_boards(self):
        """
        Get all boards.
        """

        data = await self.interact(f'boards')

        value = tuple(map(Asset, data['boards']))

        return value

    async def get_threads(self, board_id, page=None):
        """
        Get the board's active threads.

        Parameters:
            board_id: str
                board name
            page: int [None]
                board index
                omitting this will return all pages
        """

        route = f'{board_id}/' + (str(page) if page else 'threads')

        data = await self.interact(route)

        value = tuple(map(Asset, data))

        return value

    async def get_archive(self, board_id):
        """
        Get the board's archived threads.

        Parameters:
            board_id: str
                board name
        """

        route = f'{board_id}/archive'

        try:

            data = await self.interact(route)

        except aiohttp.ClientResponseError as error:

            if error.status == 404:

                return ()

            raise

        value = tuple(data)

        return value

    async def get_thread(self, board_id, thread_id):
        """
        Get the board thread's posts.

        Parameters:
            board_id: str
                board name
            thread_id: int
                thread identifier
        """

        route = f'{board_id}/thread/{thread_id}'

        data = await self.interact(route)

        value = tuple(map(Asset, data['posts']))

        return value

    async def get_catalog(self, board_id):
        """
        Get the board's catalog.

        Parameters:
            board_id: str
                board name
        """

        route = f'{board_id}/catalog'

        data = await self.interact(route)

        value = Asset(data)

        return value
Example #27
0
    from .web_server import Server  # noqa

_RequestFactory = Callable[[
    RawRequestMessage,
    StreamReader,
    "RequestHandler",
    AbstractStreamWriter,
    "asyncio.Task[None]",
], BaseRequest, ]

_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
_AnyAbstractAccessLogger = Union[Type[AbstractAsyncAccessLogger],
                                 Type[AbstractAccessLogger], ]

ERROR = RawRequestMessage("UNKNOWN", "/", HttpVersion10, {}, {}, True, False,
                          False, False, yarl.URL("/"))


class RequestPayloadError(Exception):
    """Payload parsing error."""


class PayloadAccessError(Exception):
    """Payload was accessed after response was sent."""


class AccessLoggerWrapper(AbstractAsyncAccessLogger):
    """
    Wraps an AbstractAccessLogger so it behaves
    like an AbstractAsyncAccessLogger.
    """
Example #28
0
def slug_url(url) -> str | None:
    """Convert a camera url into a string suitable for a camera name."""
    if not url:
        return None
    return slugify(yarl.URL(url).host)
Example #29
0
def test_reverse_and_forward_url(url, rev_url):
    assert reverse_url(url) == rev_url
    assert yarl.URL(forward_url(rev_url)) == yarl.URL(url)
Example #30
0
 def __init__(self, debug=False):
     self.loop = asyncio.get_event_loop()
     self.geoip_url = yarl.URL('http://ip-api.com/json/')
     self.whois_server = 'whois.apnic.net'
     self.debug = debug
     self.loop.set_debug(self.debug)