class LogLevelCountFilter(logging.Filter): counter = Counter( name='log_events', documentation='Number of error level events that made it to the logs', labelnames=('level',)) def filter(self, record: logging.LogRecord) -> bool: LogLevelCountFilter.counter.labels( level=record.levelname ).inc() return True
def setup(self): config = self.configs.get(self.dbname, {}).copy() config.update(self.push_options) prefix = config.pop('prefix', None) self.setup_gateway(config) request_total_counter_name = '{}_request_total'.format( prefix) if prefix else 'request_total' self.request_total_counter = Counter( request_total_counter_name, 'Total number of requests', ['transport', 'service', 'endpoint', 'method', 'status']) request_latency_histogram_name = '{}_request_latency'.format( prefix) if prefix else 'request_latency' self.request_latency_histogram = Histogram( request_latency_histogram_name, 'Request duration in seconds', ['transport', 'service', 'endpoint', 'method', 'status'])
from providers.db_provider import depends_db, DbProvider router = APIRouter() logger = logging.getLogger(__name__) TimezoneEnum = Enum( "TimezoneEnum", { timezone.replace("/", ""): timezone for timezone in pytz.common_timezones }, ) REWARDS_REQUEST_COUNT = Counter( "rewards_request", "Amount of times rewards were requested", labelnames=("timezone", "currency", "calendar_year"), ) VALIDATORS_PER_REWARDS_REQUEST = Histogram( "validators_per_rewards_request", "Amount of validator indexes contained in a /rewards request", buckets=[0, 1, 2, 3, 4, 5, 10, 20, 50, 100, 1000, 10000, float("inf")], ) @router.get( "/rewards", response_model=AggregateRewards, summary="Returns per-day rewards for the specified validator indexes.", )
# # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from prometheus_client.metrics import Counter, Histogram from prometheus_client.registry import CollectorRegistry from prometheus_client.process_collector import ProcessCollector from prometheus_client.gc_collector import GCCollector registry = CollectorRegistry() process = ProcessCollector(registry=registry) gc_collector = GCCollector(registry=registry) req_counter = Counter('promsd_request_count', 'The amount of requests', registry=registry) build_counter = Counter('promsd_build_count', 'The amount of time a config is saved', registry=registry) event_counter = Counter('promsd_event_count', 'Amount of events received', registry=registry) reinit_counter = Counter('promsd_reinit_count', 'Amount of time service restarted event loop', registry=registry) errors_counter = Counter('promsd_errors_count', 'Amount of errors caught', registry=registry) build_duration = Histogram('promsd_build_seconds',
from fastapi import FastAPI from httpx import BasicAuth from aioredis import Redis import pytz from db.tables import Balance from providers.http_client_w_backoff import AsyncClientWithBackoff from prometheus_client.metrics import Counter GENESIS_DATETIME = datetime.datetime.fromtimestamp(1606824023, tz=pytz.utc) SLOT_TIME = 12 SLOTS_PER_EPOCH = 32 logger = logging.getLogger(__name__) BEACONCHAIN_REQUEST_COUNT = Counter("beaconchain_request_count", "Count of requests to beaconcha.in") BEACON_NODE_REQUEST_COUNT = Counter( "beacon_node_request_count", "Count of requests made to the beacon node", labelnames=("path", )) class BeaconNode: async def __call__(self) -> Any: return self @staticmethod def _use_infura() -> bool: return os.getenv("BEACON_USE_INFURA", "false") == "true" def _get_http_client(self) -> AsyncClientWithBackoff:
from prometheus_client.metrics import Counter, Histogram PREFIX = "pycernan" def p(n): return PREFIX + "_" + n # Generates buckets ranging from 64 bytes to 1MB, in powers of 2 SIZE_BUCKETS = [2**i for i in range(6, 21)] ack_count = Counter(p('ack_count'), "Number of acknowledgements received.") ack_invalid_count = Counter(p('ack_invalid_count'), "Number of invalid acknowledgements received.") ack_latency = Histogram(p('ack_latency'), "Acknowledgement latency in seconds.") ack_request_count = Counter(p('ack_request_count'), "Number of acknowledgements requested.") bytes_sent = Counter(p('bytes_sent'), "Total bytes sent.") bytes_received = Counter(p('bytes_recv'), "Total bytes received.") conn_create_count = Counter( p('conn_create_count'), "Number of connections established by connection pool.") conn_close_count = Counter(p('conn_close_count'), "Number of connections closed by connection pool.") conn_failure_count = Counter( p('conn_failure_count'), "Number of failures to yield a connection from the pool.")
import logging from sqlite3 import OperationalError from mysql.connector import MySQLConnection from prometheus_client.metrics import Counter, Gauge, Summary RECV_MESSAGE_COUNT = Counter('bot_recv_message_count', 'Received messages') SENT_MESSAGE_COUNT = Counter('bot_sent_message_count', 'Sent text messages') FAILED_MESSAGE_COUNT = Counter('bot_failed_message_count', 'Number of messages failed to send') SENT_IMAGES_COUNT = Counter('bot_sent_images_count', 'Sent images') BOT_COMMAND_COUNT = Counter('bot_command_total', 'Received Bot Commands', ['command']) BOT_RESPONSE_TIME = Summary('bot_response_time', 'Latency of requests') # SingleCommand DISCARDED_MESSAGE_COUNT = Counter('bot_discard_message_count', 'Received but discarded messages') SINGLE_COMMAND_RESPONSE_TIME = Summary( 'bot_response_time_single', 'Response time to single command input') # User statistics USER_COUNT = Gauge('bot_total_user', 'Number of Bot users', ['platform']) AVERAGE_SUBSCRIPTION_COUNT = Gauge('bot_avg_subscriptions', 'Average No. of subscriptions') # Visualization related CREATED_GRAPHS = Counter('bot_viz_created_graph_count', 'Number of created graphs', ['type']) CACHED_GRAPHS = Counter('bot_viz_cached_graph_count',
class NotificationSender: logger = logging.getLogger(__name__) METRICS_NOTIFICATION_TIME = Histogram("notification_send_time_sec", "Time to send notification", ['type']) METRICS_NOTIFICATION_COUNT = Counter("notification_counter", "Number of sent notifications", ['service']) def __init__(self) -> None: self.image_manager = ImageManager() self.updater = Updater(token=environ.get('HS_TELEGRAM_BOT_TOKEN'), request_kwargs={ "connect_timeout": 60., "read_timeout": 60. }, use_context=True) def send_to_chat(self, notif: Notification): try: price_per_m = int(notif.price / notif.area) except Exception: price_per_m = None desc = f'{notif.id}\nPrice: {notif.price} ({price_per_m}/m2)\nArea: {notif.area}\nWhere: {notif.location}\nURL: {notif.url}\n\n{notif.description}' desc = desc[:4090] self.METRICS_NOTIFICATION_COUNT.labels(notif.source).inc() chat_id = environ.get('HS_TELEGRAM_CHAT_ID') reference_message = None if not notif.pics_urls: return new_images = [] try: for c in chunks(notif.pics_urls, 10): new_images, seen_in_messages = self.image_manager.check_all( notif, c) seen_in = None if not len( seen_in_messages) else seen_in_messages.pop() reference_message = None if not seen_in else seen_in.get( 'message_id') if reference_message: self.logger.info( f"Found photo duplicates: {notif.url} vs. {seen_in['notif'].url}" ) if len(new_images): self.logger.info(f"Sending {len(new_images)} images") send_pic_res = self._send_pics( new_images, chat_id, desc, reply_to_message_id=reference_message) if send_pic_res and hasattr(send_pic_res[0], 'message_id'): self.image_manager.set_message_ids( [v['hash'] for v in new_images.values()], send_pic_res[0].message_id) except Exception as e: self.logger.error(e, traceback.format_exc()) if len(new_images): self._send_message(chat_id, desc, reference_message=reference_message) else: self.logger.info("No new images found, not sending the message" ) # TODO: check if price has changed @nofail(retries=20, sleep=1, failback_result=None) def _send_message(self, chat_id, desc, reference_message=None): with self.METRICS_NOTIFICATION_TIME.labels('message').time(): log_msg = desc.replace('\n', '; ') self.logger.info( f"Sending message: {chat_id} {log_msg} {reference_message} ") self.updater.bot.send_message( chat_id, desc, timeout=20 * 60, disable_web_page_preview=True, reply_to_message_id=reference_message, disable_notification=reference_message is not None) @nofail(retries=20, sleep=1, failback_result=None) def _send_pics(self, c, chat_id, desc, **kwargs): with self.METRICS_NOTIFICATION_TIME.labels('pics').time(): return self.updater.bot.send_media_group(chat_id, [ InputMediaPhoto(BytesIO(i['image']), caption=desc) for i in c.values() ], timeout=20 * 60, disable_notification=True, **kwargs)
from rep0st.framework.signal_handler import on_shutdown FLAGS = flags.FLAGS flags.DEFINE_string('webserver_bind_hostname', '0.0.0.0', 'Hostname to which to bind the HTTP server to.') flags.DEFINE_integer('webserver_bind_port', 5000, 'Port to which to bind the HTTP server to.') _WebserverBindHostnameKey = NewType('_WebserverBindHostnameKey', str) _WebserverBindPortKey = NewType('_WebserverBindPortKey', str) log = logging.getLogger(__name__) request_logger = logging.getLogger(__name__ + '.request') framework_webserver_requests_z = Counter( 'framework_webserver_requests', 'Number of requests handled by the web server with the given status', ['status']) framework_webserver_requests_z.labels(status=200) framework_webserver_requests_z.labels(status=404) framework_webserver_requests_z.labels(status=500) framework_webserver_endpoint_requests_z = Counter( 'framework_webserver_endpoint_requests', 'Number of requests per rule and status', ['rule', 'method', 'status']) def _get_status_code(status: str): if not status: return -1 parts = status.split(' ') if len(parts) < 1: return -1