def __init__(self,
              url=None,
              check_interval=None,
              max_observation_window=-600):
     """
     PARAMETERS: url: String. Must include protocol prefix e.g. http://
     """
     # Both raise exceptions if not compliant
     self.validate_url(url)
     self.validate_check_interval(check_interval)
     self.url = url
     self.check_interval = check_interval
     self.stats = WebStat(max_observation_window)
     self.dashboard = WebPerformanceDashboard()
def WebStat_2mins_datapoints_25():
    """Returns 25 datapoints"""
    WebStat_2mins = WebStat(-600)

    for i in range(24):
        WebStat_2mins.update({"response_code": 200, "response_time": 0.05})

    # Last datapoint increases avg
    WebStat_2mins.update({"response_code": 200, "response_time": 1})

    return WebStat_2mins
def WebStat_10mins_balanced_datapoints(datetimes_dict):
    """Returns a 10 min max observation time webstat"""
    WebStat_2mins = WebStat(600)

    # Manually add to force average
    for dt_key, response_time in [
        ("minus_10", -10.5),
        ("minus_2", -2.5),
        ("plus_2", 2.5),
        ("plus_10", 10.5),
    ]:

        WebStat_2mins.data_points.append(
            {
                "response_code": 200,
                "response_time": response_time,
                "received_at": datetimes_dict[dt_key],
            }
        )

    return WebStat_2mins
def WebStat_10mins_recent_datapoints(datetimes_dict):
    """Returns 25 datapoints"""
    ws = WebStat(600)

    for k in ["now_minus_2", "now_minus_10", "now_plus_2", "now_plus_10"]:

        ws.data_points.append(
            {
                "response_code": 200,
                "response_time": 0.05,
                "received_at": datetimes_dict[k],
            }
        )

    # Last datapoint increases avg
    ws.data_points.append(
        {
            "response_code": 200,
            "response_time": 1.5,
            "received_at": datetimes_dict["now"],
        }
    )

    return ws
class Website:
    def __init__(self,
                 url=None,
                 check_interval=None,
                 max_observation_window=-600):
        """
        PARAMETERS: url: String. Must include protocol prefix e.g. http://
        """
        # Both raise exceptions if not compliant
        self.validate_url(url)
        self.validate_check_interval(check_interval)
        self.url = url
        self.check_interval = check_interval
        self.stats = WebStat(max_observation_window)
        self.dashboard = WebPerformanceDashboard()

    def validate_url(self, url):
        try:
            _ = httpx.get(url)
        except Exception:
            raise Exception("URL error")

    def validate_check_interval(self, check_interval: int):
        """
        Check_interval must be a +ve integer
        """
        if check_interval < 1:
            raise Exception("check_interval must be a positive integer")

    def update_alert_process(self, availability: float):
        """
        Takes as availability % and sends it to the self.stats Stat
        instance. An alert will be returned if alert criteria are triggered

        RETURNS: String or None
        """
        # Don't send a None value
        # but 0.0 is ok
        if availability is not None:
            alert = self.stats.alert_coro.send(availability)
            if alert:
                # Alerts are saved to the dashboard
                self.dashboard.persisted_messages.append(alert)

    async def produce_report(self, timeframe: int, writer: ConsoleWriter):
        """
        Retrieves updated stats from the self.stats instance
        and forwards to the ConsoleWriter instance writer
        for formatting before writing to the console

        PARAMETERS: timeframe: In seconds as int e.g. 
                              -60 datapoints from previous minute
                              writer: ConsoleWriter instance responsible 
                              formatting reports and writing to the console
        """

        updated_stats = self.stats.get_updated_stats(timeframe=timeframe)
        updated_stats["url"] = self.url
        timestamp = datetime.datetime.now().strftime("%c")  # Local time format
        updated_stats["timestamp"] = timestamp
        updated_stats["timeframe"] = timeframe

        self.dashboard.data = updated_stats

        # Adds alert message if needed
        self.update_alert_process(updated_stats["availability"])

        # Outputs all dashboards attached to the writer
        writer.write_dashboards_to_console()
        await asyncio.sleep(0)

    async def update(self):
        r = await self.ping_url(self.url)
        datapoint = {
            "response_code": r.status_code,
            "response_time": r.elapsed
        }
        # Only updating stats here.
        # No query until reports are generated.
        self.stats.update(datapoint)

    async def periodic_data_update_process(self):
        """
        Simply updates stats every check interval period
        """
        while True:
            await asyncio.sleep(self.check_interval)
            await asyncio.create_task(self.update())

    async def periodic_report_production_process(self, frequency: int,
                                                 timeframe: int,
                                                 writer: ConsoleWriter):
        """
        Coroutine to schedule a report every {frequency} seconds
        using data over the last {timeframe} minutes.

        PARAMETERS: frequency and timeframe fixed for challenge
                    but can be changed in 
                    website_monitoring_application instantiation
        """
        while True:
            await asyncio.sleep(frequency)
            await asyncio.create_task(self.produce_report(timeframe, writer))

    async def all_async_tasks(self, schedules: dict, writer: ConsoleWriter):
        """
        A coroutine pulling update and reporting tasks from the queues.

        schedules are dicts with the following format:

            schedule1 = {"frequency": 10, "timeframe": -600}
            schedule2 = {"frequency": 60, "timeframe": -60 * 60}

        Fixed for the challenge.
        """
        # Every Website instance shares the same writer instance
        self.writer = writer

        # Data update process
        coros = [self.periodic_data_update_process()]

        # Adds task for each scheduled report
        for schedule in schedules:

            freq = schedule["frequency"]
            timef = schedule["timeframe"]

            coros.extend([
                # Reporting Process
                self.periodic_report_production_process(freq, timef, writer)
            ])

        await asyncio.gather(*coros)

    async def ping_url(self, url) -> None:
        """
        Responsible for making a single request to
        self.url after the delay and returning
        the response

        PARAMETERS: delay:int, how many seconds to 
                    wait until execution
                    client: httpx session object
                    url: url to ping
        RETURNS: httpx reauest response object
        """
        async with httpx.AsyncClient() as client:
            return await client.get(url, timeout=None)
def WebStat_25mins():
    """Returns a 2 minute observation window webstat"""
    return WebStat(max_observation_window=-1500)
def alert_generator():
    wb = WebStat()
    return wb.alert_generator()