Exemple #1
0
    def from_iterpoint(cls: Type[PT], *values: List[Any],
                       meta: Dict[str, str]) -> PT:
        """Only intended to be passed to the ``iterpoints`` method of ``aioinflux`` to
        parse the points and construct valid InfluxDBPoint instances. See its
        documentation for a description of the contents of ``values`` and ``meta``.

        The metadata of dataclass attributes are used to parse and convert the necessary
        information, unknown values and tags are dropped.

        :param cls: Subclass of :class:`InfluxDBPoint` on which this method is called.
            Instances of this class will be tried to be constructed given the returned
            data from the InfluxDB and returned.
        """
        measurement_name = meta["name"]
        combined_dict = dict(zip(meta["columns"], values))
        args: Dict[str, Any] = {
            "measurement": measurement_name,
            "timestamp": deserialize(combined_dict["time"], datetime),
        }
        for f in fields(cls):
            if f.default is not MISSING:
                continue
            # values of this fields are already known
            if f.name in args:
                continue
            try:
                args[f.name] = deserialize(combined_dict[f.name], f)
            except Exception as e:
                internal_logger.info(
                    str(e) + "values were: %s, %s", combined_dict[f.name], f)
        new_point = cls(**args)
        internal_logger.debug("Constructed %s", new_point)
        return new_point
Exemple #2
0
async def credits_history(request: web.Request) -> Dict[str, Any]:
    """Shows a functional draft for visualization of a project's credits history.

    To generate test entries take a look at ``bin/generate_credits_history.py`` at the
    root of this project.
    """
    internal_logger.info(f"Called: {request.rel_url}")
    return {"project_name": request.match_info["project_name"]}
Exemple #3
0
async def create_worker(app: web.Application) -> None:
    """Creates :ref:`Task Workers` to process items put into the :ref:`Task Queue`.

    The amount of them can configured via ``OS_CREDITS_WORKERS``, see :ref:`Settings`.
    """
    app["task_workers"] = {
        f"worker-{i}": create_task(worker(f"worker-{i}", app))
        for i in range(app["config"]["OS_CREDITS_WORKERS"])
    }
    internal_logger.info("Created %d workers", len(app["task_workers"]))
Exemple #4
0
async def costs_per_hour(request: web.Request) -> web.Response:
    """Use for example

    .. code-block:: console

       $ curl localhost:8000/api/costs_per_hour \\
             -H "Content-Type: application/json" \\
             -d '{"cpu":16,"ram":32768}'

    Or if you have `httpie <https://github.com/jakubroztocil/httpie>`_ installed

    .. code-block:: console

       $ http -j :8000/api/costs_per_hour cpu:=16 ram:=32768
    ---
    description: Given the submitted specs of one or multiple machines combined
      calculate the expected costs per hour. See the ``GET /api/metrics`` to retrieve
      information about the supported specs. Since the input is dynamic and we are not
      using swagger models you have to query the API from the command line. See official
      documentation of the function for example calls.
    tags:
      - Service
    consumes:
      - application/json
    produces:
      - application/json
    responses:
      200:
        description: Costs per hour
        schema:
          type: float
    """
    internal_logger.info(f"Called: {request.rel_url}")
    try:
        machine_specs = await request.json()
    except JSONDecodeError:
        raise web.HTTPBadRequest(reason="Invalid JSON")
    returned_costs_per_hour = Decimal(0)
    for friendly_name, spec in machine_specs.items():
        try:
            spec = Decimal(spec)
            returned_costs_per_hour += Metric.metrics_by_friendly_name[
                friendly_name].costs_per_hour(spec)
        except KeyError:
            raise web.HTTPNotFound(
                reason=f"Unknown measurement `{friendly_name}`.")
        except TypeError:
            raise web.HTTPBadRequest(
                reason=f"Parameter {friendly_name} had wrong type.")
    return web.json_response(
        float(returned_costs_per_hour.quantize(
            config["OS_CREDITS_PRECISION"])))
Exemple #5
0
async def update_logging_config(request: web.Request) -> web.Response:
    """
    Possibility to update logging configuration without restart
    """
    internal_logger.info(f"Called: {request.rel_url}")
    logging_json_text = await request.text()
    try:
        logging_config = loads(logging_json_text)
    except JSONDecodeError as e:
        raise web.HTTPBadRequest(reason=str(e))
    try:
        logging.config.dictConfig(logging_config)
    except Exception as e:
        raise web.HTTPBadRequest(reason=str(e))
    return web.HTTPNoContent()
Exemple #6
0
async def get_metrics(_: web.Request) -> web.Response:
    """
    Returns a JSON object describing the currently supported metrics and their per-hour
    costs.
    ---
    description: Get type and description of currently needed/supported measurements.
      Also describes the structure of the corresponding POST API to calculate the per
      hour-usage of a given machine constellation.
    tags:
      - Service
    produces:
      - application/json
    responses:
      200:
        description: Information object
        schema:
          type: object
          required: [metrics]
          properties:
            metrics:
              type: object
              required: [description, type, metric_name, friendly_name]
              properties:
                description:
                  type: str
                  description: Description of the measurement
                type:
                  type: str
                  description: Type information
                metric_name:
                  type: str
                  description: Name/Identifier of the metric inside prometheus and
                      InfluxDB
                friendly_name:
                  type: str
                  description: Human readable name of the metric.
    """
    internal_logger.info(f"Called: {_.rel_url}")
    metric_information = {
        friendly_name: metric.api_information()
        for friendly_name, metric in Metric.metrics_by_friendly_name.items()
    }
    return web.json_response(metric_information)
Exemple #7
0
async def influxdb_write(request: web.Request) -> web.Response:
    """
    Consumes the `Line Protocol
    <https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/>`_
    of InfluxDB.

    :param request: Incoming request with one or multiple *InfluxDB Line* instances.
    ---
    description: Used by InfluxDB to post subscription updates
    tags:
      - Service
    consumes:
      - text/plain
    parameters:
      - in: body

        name: line

        description: Point in Line Protocol format (https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial)
        schema:
          type: string

          example: weather,location=us-midwest temperature=82 1465839830100400200
        required: true
    responses:
      202:
        description: A corresponding task object will be created. See application log
          for further information
    """  # noqa (cannot fix long url)
    # .text() performs automatic decoding from bytes
    internal_logger.info(f"Called: {request.rel_url}")
    influxdb_lines = await request.text()
    # an unknown number of lines will be send, put them all into the queue
    for influx_line in influxdb_lines.splitlines():
        await request.app["task_queue"].put(influx_line)
        internal_logger.debug(
            "Put %s into queue (%s elements)",
            influx_line,
            request.app["task_queue"].qsize(),
        )
    # always answer 202
    return web.HTTPAccepted()
Exemple #8
0
async def stop_worker(app: web.Application, queue_timeout: int = 120) -> None:
    """Tries to shutdown all :ref:`Task Workers` gracefully by first emptying the
    :ref:`Task Queue` before cancelling the workers.

    :param app: Application instance holding the worker tasks and the task queue.
    :param queue_timeout: Seconds to wait finish remaining tasks in queue before killing
        task workers.
    """
    internal_logger.info("Waiting up to %d seconds to finish remaining tasks.",
                         queue_timeout)
    try:
        await wait_for(app["task_queue"].join(), timeout=queue_timeout)
    except TimeoutError:
        internal_logger.warning(
            "Waited %d seconds for all remaining tasks to be processed, killing "
            "workers now.",
            queue_timeout,
        )
    for task in app["task_workers"].values():
        task.cancel()
    await gather(*app["task_workers"].values(), return_exceptions=True)
Exemple #9
0
async def ping(_: web.Request) -> web.Response:
    """
    Simple ping endpoint to be able to determine whether the application is up and
    running.

    :return: Response with the text body ``Pong``.

    ---
    description: This end-point allow to test that service is up.
    tags:
    - Health check
    produces:
    - text/plain
    responses:
        200:
            description: successful operation. Return "Pong" text
        405:
            description: invalid HTTP Method
    """
    internal_logger.info(f"Called: {_.rel_url}")
    return web.Response(text="Pong")
Exemple #10
0
    def construct_message(self) -> MIMEText:
        """Constructs a :class:`~email.mime.text.MIMEText` object from the
        notification's attributes.

        The recipient placeholders are resolved, body and subject templates are rendered
        with the following default placeholders:

        ``project``
            Name of the Project as stored in Perun.
        ``credits_used``
            The current value of :class:`~os_credits.perun.attributes.DenbiCreditsUsed`.
        ``credits_granted``
            The current value of
            :class:`~os_credits.perun.attributes.DenbiCreditsGranted`.

        Subclasses are advised to add their own placeholders to
        :attr:`custom_placeholders` instead of overwriting this method. If any
        placeholder of a template cannot be resolved it will be left in place to ensure
        that the message can be constructed and sent.
        """

        placeholders = {
            "project": self.group.name,
            "credits_used": str(self.group.credits_used.value),
            "credits_granted": str(self.group.credits_granted.value),
            **self.custom_placeholders,
        }
        try:
            rendered_subject = self._subject.substitute(placeholders)
        except KeyError as e:
            internal_logger.error(
                "Subject of Notification %s contains unknown placeholder %s. Sending "
                "partially unformatted mail.",
                type(self).__name__,
                e,
            )
            rendered_subject = self._subject.safe_substitute(placeholders)
        except ValueError as e:
            internal_logger.error(
                "Subject of Notification %s contains invalid placeholder %s.",
                type(self).__name__,
                e,
            )
            raise BrokenTemplateError(f"Subject of Notification {type(self).__name__}")
        try:
            rendered_body = self._body.substitute(placeholders)
        except KeyError as e:
            internal_logger.error(
                "Body of Notification %s contains unknown placeholder %s. Sending "
                "partially unformatted mail.",
                type(self).__name__,
                e,
            )
            rendered_body = self._body.safe_substitute(placeholders)
        except ValueError as e:
            internal_logger.error(
                "Body of Notification %s contains invalid placeholder %s.",
                type(self).__name__,
                e,
            )
            raise BrokenTemplateError(f"Body of Notification {type(self).__name__}")
        message = MIMEText(rendered_body)
        message["Subject"] = rendered_subject
        message["From"] = config["MAIL_FROM"]
        if config["NOTIFICATION_TO_OVERWRITE"].strip():
            internal_logger.info(
                "Applying `NOTIFICATION_TO_OVERWRITE` setting to notification `%s`",
                self,
            )
            message["To"] = config["NOTIFICATION_TO_OVERWRITE"]
        else:
            message["To"] = self._resolve_recipient_placeholders(self.to)
            message["Cc"] = self._resolve_recipient_placeholders(self.cc)
            message["Bcc"] = self._resolve_recipient_placeholders(self.bcc)
        internal_logger.debug(
            "Recipients of notification `%s`: To=%s, Cc=%s, Bcc=%s",
            self,
            message["To"],
            message["Cc"],
            message["Bcc"],
        )

        return message
Exemple #11
0
async def create_app(
    _existing_influxdb_client: Optional[InfluxDBClient] = None
) -> web.Application:
    """Entry point of the whole service.

    #. Setup the logging config to be able to log as much as possible, see
       :ref:`Logging`
    #. Connect the functions to their endpoints, see :ref:`Endpoints`
    #. Setup/Create all helpers

       #. Our :class:`~os_credits.influx.client.InfluxDBClient`, see :ref:`InfluxDB
          Interaction`
       #. Create the :ref:`Task Queue` used to process incoming measurements
       #. Setup our :ref:`Group Locks`
       #. Swagger-Endpoint, see :ref:`Swagger`
       #. Setup the jinja2 template engine used by :ref:`Credits History`

    #. Make sure that the database for our :ref:`Credits History` exists. Error out if
       it does not since we (deliberately) do not run with admin access to it.
    #. Schedule the following functions to run on start

       - :func:`create_client_sessions`
       - :func:`create_worker`
       - :func:`setup_prometheus_metrics`

    #. Schedule the following functions to run on shutdown

       - :func:`stop_worker`
       - :func:`close_client_session`

    :param _existing_influxdb_client: Only used when testing the code
    :return: Created `aiohttp <https://docs.aiohttp.org>`_ Application instance.
    """
    # imported inside the function to allow pytest to set environment variables and have
    # them applied
    from os_credits.settings import config
    from os_credits.log import DEFAULT_LOGGING_CONFIG

    dictConfig(DEFAULT_LOGGING_CONFIG)
    internal_logger.info("Applied default logging config")

    app = web.Application()
    app.add_routes([
        web.get("/delete", delete_mb_and_vcpu_since),
        web.get("/delete_credits_left", delete_credits_left),
        web.get(
            "/api/credits_history/{project_name}",
            credits_history_api,
            name="api_credits_history",
        ),
        web.get("/api/metrics", get_metrics, name="get_metrics"),
        web.post("/api/costs_per_hour", costs_per_hour, name="costs_per_hour"),
        web.get(
            "/credits_history/{project_name}",
            credits_history,
            name="credits_history",
        ),
        # not naming this route since it also used as health check by Docker
        web.get("/ping", ping, name="ping"),
        web.get("/stats", application_stats),
        # not naming this route since the endpoint is defined by InfluxDB and
        # therefore fixed
        web.post("/write", influxdb_write),
        web.post("/logconfig", update_logging_config),
        web.get("/metrics", aio.web.server_stats, name="metrics"),
        web.static("/static", APP_ROOT / "static"),
    ])
    app.update(
        name="os-credits",
        influx_client=_existing_influxdb_client or InfluxDBClient(),
        task_queue=Queue(),
        group_locks=defaultdict(create_new_group_lock),
        start_time=datetime.now(),
        config=config,
    )

    if not await app["influx_client"].ensure_history_db_exists():
        raise MissingInfluxDatabase(
            f"Required database {config['CREDITS_HISTORY_DB']} does not exist inside "
            "InfluxDB. Must be created externally since this code runs without admin "
            "access.")

    # setup jinja2 template engine
    setup(app, loader=FileSystemLoader(str(APP_ROOT / "templates")))

    app.on_startup.append(create_client_session)
    app.on_startup.append(create_worker)
    app.on_startup.append(setup_prometheus_metrics)
    app.on_cleanup.append(stop_worker)
    app.on_cleanup.append(close_client_sessions)

    setup_swagger(app)

    internal_logger.info("Registered resources: %s",
                         pformat(list(app.router.resources())))

    return app
Exemple #12
0
async def delete_mb_and_vcpu_since(request: web.Request) -> web.Response:
    internal_logger.info(f"Called: {request.rel_url}")
    await stop_worker(request.app, 500)
    try:
        influx_client: InfluxDBClient = request.app["influx_client"]
        since_date = request.query["since_date"]
        datetime_format = "%Y-%m-%d %H:%M:%S"
        since_date = datetime.strptime(since_date, datetime_format)
        project_names = request.query["project_names"]
        project_names = project_names.split(",")
        since_date = int(since_date.timestamp())
    except KeyError as e:
        internal_logger.exception(
            f"Exception when getting request information for "
            f"deleting value history:\n"
            f"{e}")
        await create_worker(request.app)
        return web.HTTPException(text="Key Error.")
    except ValueError as e:
        internal_logger.exception(
            f"Exception when getting request information for "
            f"deleting value history:\n"
            f"{e}")
        await create_worker(request.app)
        return web.HTTPException(text="Value Error.")
    except Exception as e:
        internal_logger.exception(
            f"Exception when getting request information for "
            f"deleting value history:\n"
            f"{e}")
        await create_worker(request.app)
        return web.HTTPException(text="Exception.")

    return_list = []
    internal_logger.info(
        f"Trying to delete usage values for project: {project_names} "
        f"since {since_date}.")
    for project_name in project_names:
        try:
            last_timestamps = await influx_client.delete_mb_and_vcpu_measurements(
                project_name, since_date)

            if "project_name" not in last_timestamps \
                    or "location_id" not in last_timestamps:
                internal_logger.info(f"Could not find group {project_name} in "
                                     f"influxdb!")
                return_list.append({
                    "error":
                    f"Could not find '{project_name}' "
                    f"in influxdb."
                })
                continue
            perun_group = Group(last_timestamps["project_name"],
                                int(last_timestamps["location_id"]))
            await perun_group.connect()
            last_mb = last_timestamps.get("last_mb", None)
            if last_mb:
                perun_group.credits_timestamps.value[
                    "project_mb_usage"] = datetime.fromtimestamp(
                        last_timestamps["last_mb"]["time"])
            else:
                perun_group.credits_timestamps.value[
                    "project_mb_usage"] = datetime.now()
            last_vcpu = last_timestamps.get("last_vcpu", None)
            if last_vcpu:
                perun_group.credits_timestamps.value[
                    "project_vcpu_usage"] = datetime.fromtimestamp(
                        last_timestamps["last_vcpu"]["time"])
            else:
                perun_group.credits_timestamps.value[
                    "project_vcpu_usage"] = datetime.now()
            await perun_group.save()
            internal_logger.info(f"Deleted values and set timestamps for "
                                 f"'{project_name}' with {last_timestamps}.")
            return_list.append(last_timestamps)
        except GroupNotExistsError as e:
            internal_logger.warning(
                "Could not resolve group with name `%s` against perun. %r",
                project_name, e)
            return_list.append(
                {"error": f"Could not find perun group "
                 f"'{project_name}'."})
            continue
        except Exception as e:
            internal_logger.exception(
                f"Exception when deleting value history:\n"
                f"{e}")
            return_list.append({
                "error":
                f"Could not delete values for "
                f"'{project_name}'."
            })
            continue

    await create_worker(request.app)
    return web.json_response(return_list)
Exemple #13
0
async def application_stats(request: web.Request) -> web.Response:
    """
    API-Endpoint returning current stats of the running application
    ---
    description: Allows querying the application state. Should not be public accessible.
    tags:
      - Health check
      - Monitoring
    produces:
      - application/json
    parameters:
      - in: query
        name: verbose
        type: boolean
        default: false
        description: Include extended (computationally expensive) information
    responses:
      200:
        description: Stats object
        schema:
          type: object
          required: [number_of_workers, queue_size, number_of_locks, uptime]
          properties:
            number_of_workers:
              type: integer
              description: Number of worker tasks as specified in config file
            queue_size:
              type: integer
              description: Number of tasks currently pending
            number_of_locks:
              type: integer
              description: Number of group/project locks inside the application, should
                correspond to the number of billed/groups/projects
            uptime:
              type: string
              description: Uptime, string representation of a python
                  [`timedelta`](
                  https://docs.python.org/3/library/datetime.html#timedelta-objects)
                object
            task_stacks:
              type: object
              required: [worker-n]
              properties:
                worker-n:
                  type: str
                  description: Stack of the worker task
            group_locks:
              type: object
              required: [group_name]
              properties:
                group_name:
                  type: str
                  description: State of the group/project async-lock
    """
    internal_logger.info(f"Called: {request.rel_url}")
    stats = {
        "number_of_workers": config["OS_CREDITS_WORKERS"],
        "queue_size": request.app["task_queue"].qsize(),
        "number_of_locks": len(request.app["group_locks"]),
        "uptime": str(datetime.now() - request.app["start_time"]),
    }
    if ("verbose" in request.query and request.query["verbose"]
            and request.query["verbose"] != "false"):
        stats.update({
            "task_stacks": {
                name: [format_stack(stack)[0] for stack in task.get_stack()][0]
                for name, task in request.app["task_workers"].items()
            },
            "group_locks": {
                key: repr(lock)
                for key, lock in request.app["group_locks"].items()
            },
        })
    return web.json_response(stats)
Exemple #14
0
async def delete_credits_left(request: web.Request) -> web.Response:
    internal_logger.info(f"Called: {request.rel_url}")
    influx_client: InfluxDBClient = request.app["influx_client"]
    deleted_from_projects = await influx_client.delete_credits_left_measurements(
    )
    return web.json_response(deleted_from_projects)
Exemple #15
0
async def credits_history_api(request: web.Request) -> web.Response:
    """Endpoint for the website provided by :func:`credits_history` to retrieve its
    data, the credits history of a given project.

    ---
    description: >
      Provides the history of credits of the given project. The return format is
      currently optimized against ``c3.js`` which is used by the internal visualization.
      The first entry of every response array is a string followed by the data. The
      ``metrics`` array contains the ``friendly_name`` of the metric responsible for
      this billing. The ``timestamps`` array contains the timestamps of the measurements
      which caused the billing. To generate test entries take a look at
      ``bin/generate_credits_history.py`` at the root of this project.  Timestamps are
      formatted ``%Y-%m-%d %H:%M:%S`` and sorted descending.
    tags:
      - Service
    produces:
    - application/json
    parameters:
      - name: project_name
        in: path
        type: string
        description: Name of the project
      - name: start_date
        in: query
        type: string
        format: date
        description: Start date of the credits data, format ``%Y-%m-%d %H:%M:%S``
      - name: end_date
        in: query
        type: string
        format: date
        description: End date of the credits data, format ``%Y-%m-%d %H:%M:%S``
    responses:
      200:
        description: Credits history
        schema:
          type: object
          required: [timestamps, credits, metrics]
          properties:
            timestamps:
              type: array
              items:
                type: str
            credits:
              type: array
              items:
                type: float
            metrics:
              type: array
              items:
                type: str
      200:
        description: Response with requested data.
      204:
        description: Project does have credits history but not for given parameters.
      400:
        description: Bad value of one or more parameters.
      404:
        description: Could not find any history data.
    """
    internal_logger.info(f"Called: {request.rel_url}")
    datetime_format = "%Y-%m-%d %H:%M:%S"
    try:
        start_date = datetime.strptime(request.query["start_date"],
                                       datetime_format)
    except KeyError:
        start_date = datetime.fromtimestamp(0)
    except ValueError:
        raise web.HTTPBadRequest(reason="Invalid content for ``start_date``")
    try:
        end_date: Optional[datetime] = datetime.strptime(
            request.query["end_date"], datetime_format)
    except KeyError:
        end_date = None
    except ValueError:
        raise web.HTTPBadRequest(reason="Invalid content for ``end_date``")
    if end_date and end_date <= start_date:
        raise web.HTTPBadRequest(
            reason="``start_date`` must be older than ``end_date``.")
    try:
        project_name = request.match_info["project_name"]
        # Swagger UI sends '{project_name}' if none is specified -.-'
        if project_name == "{project_name}" or not project_name.strip():
            raise KeyError
    except KeyError:
        raise web.HTTPBadRequest(
            reason="No non-empty ``project_name`` provided")
    influx_client: InfluxDBClient = request.app["influx_client"]
    time_column = []
    credits_column = []
    metric_column = []
    result = await influx_client.query_billing_history(project_name,
                                                       since=start_date)
    try:
        async for point in result:
            # entries are sorted by timestamp descending
            if end_date:
                if point.timestamp > end_date:
                    continue
            time_column.append(point.timestamp.strftime(datetime_format))
            credits_column.append(float(point.credits_used))
            metric_column.append(point.metric_friendly_name)
    except InfluxDBError:
        raise web.HTTPBadRequest(reason="Invalid project name")
    # check whether any data were retrieved
    if not credits_column:
        # let's check whether the project has history at all
        if await influx_client.project_has_history(project_name):
            raise web.HTTPNoContent(reason="Try changing *_date parameters")
        raise web.HTTPNotFound(
            reason="No data available for given parameters.")
    return web.json_response({
        "timestamps": time_column,
        "credits": credits_column,
        "metrics": metric_column
    })