async def save(self, _save_all: bool = False) -> None: """Collects all annotated :class:`~os_credits.perun.base_attributes.PerunAttribute` of this group and sends/saves them to *Perun* in case their value has changed since retrieval. Uses the :attr:`~os_credits.perun.base_attributes.PerunAttribute.has_changed` attribute. :param _save_all: Save all attributes regardless whether their value was actually changed since retrieval. Also used for testing. """ internal_logger.debug("Save of Group %s called", self) changed_attrs: List[PerunAttribute[Any]] = [] changed_resource_bound_attrs: List[PerunAttribute[Any]] = [] # collect all attributes whose value has changed since retrieval for attribute_name in type(self).get_perun_attributes(): attr = getattr(self, attribute_name) # save all attributes in offline/dummy since we will not get non-stored back # from Perun if attr.has_changed or _save_all: if not attr.is_resource_bound(): changed_attrs.append(attr) else: changed_resource_bound_attrs.append(attr) if changed_attrs: internal_logger.debug( "Sending modified regular attributes to perun %s", changed_attrs ) await set_attributes(self.id, changed_attrs) if changed_resource_bound_attrs: if getattr(self, "assigned_resource", False): internal_logger.debug( "Sending modified resource bound attributes to perun %s", changed_resource_bound_attrs, ) await set_resource_bound_attributes( self.id, self.resource_id, changed_resource_bound_attrs ) else: internal_logger.warning( "Not sending modified attribute to perun, since Group %s is not " "associated with resource with id %s. How did we even retrieve any " "such attributes?", self.name, self.resource_id, )
async def stop_worker(app: web.Application, queue_timeout: int = 120) -> None: """Tries to shutdown all :ref:`Task Workers` gracefully by first emptying the :ref:`Task Queue` before cancelling the workers. :param app: Application instance holding the worker tasks and the task queue. :param queue_timeout: Seconds to wait finish remaining tasks in queue before killing task workers. """ internal_logger.info("Waiting up to %d seconds to finish remaining tasks.", queue_timeout) try: await wait_for(app["task_queue"].join(), timeout=queue_timeout) except TimeoutError: internal_logger.warning( "Waited %d seconds for all remaining tasks to be processed, killing " "workers now.", queue_timeout, ) for task in app["task_workers"].values(): task.cancel() await gather(*app["task_workers"].values(), return_exceptions=True)
def parse_config_from_environment() -> Config: # for environment variables that need to be processed PROCESSED_ENV_CONFIG: Dict[str, Any] = {} try: PROCESSED_ENV_CONFIG.update({ "OS_CREDITS_PROJECT_WHITELIST": set(environ["OS_CREDITS_PROJECT_WHITELIST"].split(";")) }) except KeyError: # Environment variable not set, that's ok pass for bool_value in ["MAIL_NOT_STARTTLS"]: if bool_value in environ: PROCESSED_ENV_CONFIG.update({bool_value: True}) for int_value_key in [ "OS_CREDITS_PRECISION", "OS_CREDITS_WORKERS", "INFLUXDB_PORT", "OS_CREDITS_PERUN_VO_ID", "MAIL_SMTP_PORT", ]: try: int_value = int(environ[int_value_key]) if int_value < 0: internal_logger.warning( "Integer value (%s) must not be negative, falling back to default " "value", int_value_key, ) del environ[int_value_key] continue PROCESSED_ENV_CONFIG.update({int_value_key: int_value}) internal_logger.debug(f"Added {int_value_key} to procssed env") except KeyError: # Environment variable not set, that's ok pass except ValueError: internal_logger.warning( "Could not convert value of $%s('%s') to int", int_value_key, environ[int_value_key], ) # since we cannot use a subset of the actual environment, see below, we have # to remove invalid keys from environment to make sure that if such a key is # looked up inside the config the chainmap does not return the unprocessed # value from the environment but rather the default one del environ[int_value_key] for decimal_value_key in [ "VCPU_CREDIT_PER_HOUR", "RAM_CREDIT_PER_HOUR", ]: try: decimal_value = Decimal(environ[decimal_value_key]) if decimal_value < 0: internal_logger.warning( "Decimal value (%s) must not be negative, falling back to default " "value", decimal_value_key, ) del environ[decimal_value_key] continue PROCESSED_ENV_CONFIG.update({decimal_value_key: decimal_value}) internal_logger.debug(f"Added {decimal_value_key} to procssed env") except KeyError: # Environment variable not set, that's ok pass except ValueError: internal_logger.warning( "Could not convert value of $%s('%s') to Decimal", decimal_value_key, environ[decimal_value_key], ) # since we cannot use a subset of the actual environment, see below, we have # to remove invalid keys from environment to make sure that if such a key is # looked up inside the config the chainmap does not return the unprocessed # value from the environment but rather the default one del environ[decimal_value_key] if "OS_CREDITS_PRECISION" in PROCESSED_ENV_CONFIG: PROCESSED_ENV_CONFIG["OS_CREDITS_PRECISION"] = ( Decimal(10)**-PROCESSED_ENV_CONFIG["OS_CREDITS_PRECISION"]) if "VCPU_CREDIT_PER_HOUR" in PROCESSED_ENV_CONFIG: PROCESSED_ENV_CONFIG["VCPU_CREDIT_PER_HOUR"] = (Decimal( PROCESSED_ENV_CONFIG["VCPU_CREDIT_PER_HOUR"])) if "RAM_CREDIT_PER_HOUR" in PROCESSED_ENV_CONFIG: PROCESSED_ENV_CONFIG["RAM_CREDIT_PER_HOUR"] = (Decimal( PROCESSED_ENV_CONFIG["RAM_CREDIT_PER_HOUR"])) # this would be the right way but makes pytest hang forever -.-' # use the workaround explained above and add the raw process environment to the # chainmap although this is not really nice :( # At least mypy should show an error whenever a config value not defined in # :class:`Config` is accessed # for key in Config.__annotations__: # # every value which needs processing should already be present in # # PROCESSED_ENV_CONFIG if set in the environment # if key in PROCESSED_ENV_CONFIG: # continue # if key in environ: # PROCESSED_ENV_CONFIG.update({key: environ[key]}) return cast(Config, PROCESSED_ENV_CONFIG)
async def delete_mb_and_vcpu_since(request: web.Request) -> web.Response: internal_logger.info(f"Called: {request.rel_url}") await stop_worker(request.app, 500) try: influx_client: InfluxDBClient = request.app["influx_client"] since_date = request.query["since_date"] datetime_format = "%Y-%m-%d %H:%M:%S" since_date = datetime.strptime(since_date, datetime_format) project_names = request.query["project_names"] project_names = project_names.split(",") since_date = int(since_date.timestamp()) except KeyError as e: internal_logger.exception( f"Exception when getting request information for " f"deleting value history:\n" f"{e}") await create_worker(request.app) return web.HTTPException(text="Key Error.") except ValueError as e: internal_logger.exception( f"Exception when getting request information for " f"deleting value history:\n" f"{e}") await create_worker(request.app) return web.HTTPException(text="Value Error.") except Exception as e: internal_logger.exception( f"Exception when getting request information for " f"deleting value history:\n" f"{e}") await create_worker(request.app) return web.HTTPException(text="Exception.") return_list = [] internal_logger.info( f"Trying to delete usage values for project: {project_names} " f"since {since_date}.") for project_name in project_names: try: last_timestamps = await influx_client.delete_mb_and_vcpu_measurements( project_name, since_date) if "project_name" not in last_timestamps \ or "location_id" not in last_timestamps: internal_logger.info(f"Could not find group {project_name} in " f"influxdb!") return_list.append({ "error": f"Could not find '{project_name}' " f"in influxdb." }) continue perun_group = Group(last_timestamps["project_name"], int(last_timestamps["location_id"])) await perun_group.connect() last_mb = last_timestamps.get("last_mb", None) if last_mb: perun_group.credits_timestamps.value[ "project_mb_usage"] = datetime.fromtimestamp( last_timestamps["last_mb"]["time"]) else: perun_group.credits_timestamps.value[ "project_mb_usage"] = datetime.now() last_vcpu = last_timestamps.get("last_vcpu", None) if last_vcpu: perun_group.credits_timestamps.value[ "project_vcpu_usage"] = datetime.fromtimestamp( last_timestamps["last_vcpu"]["time"]) else: perun_group.credits_timestamps.value[ "project_vcpu_usage"] = datetime.now() await perun_group.save() internal_logger.info(f"Deleted values and set timestamps for " f"'{project_name}' with {last_timestamps}.") return_list.append(last_timestamps) except GroupNotExistsError as e: internal_logger.warning( "Could not resolve group with name `%s` against perun. %r", project_name, e) return_list.append( {"error": f"Could not find perun group " f"'{project_name}'."}) continue except Exception as e: internal_logger.exception( f"Exception when deleting value history:\n" f"{e}") return_list.append({ "error": f"Could not delete values for " f"'{project_name}'." }) continue await create_worker(request.app) return web.json_response(return_list)