def get_or_create_counter_id(name, counter_type, db_session=None): if not db_session: db_session = g.db name = name.lower() counter = get_counter(name) if counter: return counter["counter_id"] # we fall through here if the counter does not exist # Note: counter type is only inserted for the first entry and then not updated again row = db_session.query(Counter).filter(Counter.name == name).first() if not row: db_session.commit() log.error("Creating new counter called %s", name) try: row = Counter(name=name, counter_type=counter_type) db_session.add(row) db_session.commit() except IntegrityError as e: # if someone has inserted the counter in the meantime, retrieve it if "duplicate key" in repr(e): db_session.rollback() row = db_session.query(Counter).filter( Counter.name == name).first() else: raise clear_counter_cache() counter_id = row.counter_id return counter_id
def flush_counters(): try: logger = get_task_logger(__name__) tenants = get_tenants() logger.info("Flushing counters to DB for %s tenants...", len(tenants)) for tenant in tenants: cache = get_redis(tenant) # key = 'counters:%s:%s:%s:%s:%s' % (name, counter_type, player_id, timestamp, context_id) match = cache.make_key("counters:*") for counter in cache.conn.scan_iter(match=match): num = int(cache.conn.get(counter)) cache.conn.delete(counter) parts = counter.split(":") counter_name = parts[2] counter_type = parts[3] player_id = int(parts[4]) timestamp = datetime.datetime.strptime(parts[5], "%Y%m%d%H%M%S") logger.info("Counter %s %s %s %s %s" % (counter_name, counter_type, player_id, timestamp, num)) with get_db_session(tenant) as session: row = session.query(Counter).filter(Counter.name == counter_name).first() if not row: row = Counter(name=counter_name, counter_type="count") session.add(row) session.commit() counter_id = row.counter_id add_count(counter_id, 0, timestamp, num, is_absolute=(counter_name == "absolute"), db_session=session) session.commit() except Exception as e: logger.exception(e)
def flush_request_statistics(): logger = get_task_logger(__name__) tenants = get_tenants() num_updated = 0 for tenant in tenants: tenant_name = tenant["name"] cache = RedisCache(tenant=tenant_name, redis_server=tenant['redis_server']) key_name = 'stats:numrequests' cnt = int(cache.get(key_name) or 0) if not cnt: continue timestamp = datetime.datetime.utcnow() cache.incr(key_name, -cnt) num_updated += 1 clients = {} match = cache.make_key("stats:numrequestsclient:*") for client in cache.conn.scan_iter(match=match): num = int(cache.conn.get(client)) client_id = int(client.split(":")[-1]) clients[client_id] = num cache.conn.incr(client, -num) with sqlalchemy_session(tenant["conn_string"]) as session: # global num requests counter for tenant counter_name = 'backend.numrequests' row = session.query(Counter).filter( Counter.name == counter_name).first() if not row: row = Counter(name=counter_name, counter_type="count") session.add(row) session.commit() counter_id = row.counter_id add_count(counter_id, 0, timestamp, cnt, is_absolute=True, db_session=session) session.commit() logger.info("Tenant %s has flushed %s requests to db", tenant_name, cnt) for client_id, num in clients.iteritems(): client_row = session.query(Client).get(client_id) client_row.num_requests += num logger.info( "Updated num_requests for client %s to %s. " "Total requests number for client is now %s", client_id, num, client_row.num_requests) session.commit() if num_updated: logger.info("Updated %s tenants with request statistics", num_updated)
def update_online_statistics(): """ Get the current number of logged in users (heartbeat in the last minute) and save it into a counter """ logger = get_task_logger(__name__) tenants = get_tenants() logger.info("Updating online statistics for %s tenants...", len(tenants)) num_updated = 0 for tenant in tenants: with sqlalchemy_session(tenant["conn_string"]) as session: sql = """SELECT COUNT(DISTINCT(player_id)) AS cnt FROM ck_clients WHERE heartbeat > NOW() at time zone 'utc' - INTERVAL '1 minutes'""" try: result = session.execute(sql) except Exception as e: logger.error("Error fetching data from '%s': %s", tenant["conn_string"], e) continue cnt = result.fetchone()[0] if cnt: num_updated += 1 tenant_name = tenant["name"] name = 'backend.numonline' row = session.query(Counter).filter( Counter.name == name).first() if not row: row = Counter(name=name, counter_type="absolute") session.add(row) session.commit() counter_id = row.counter_id timestamp = datetime.datetime.utcnow() add_count(counter_id, 0, timestamp, cnt, is_absolute=True, db_session=session) session.commit() logger.info("Updated num_online for %s to %s" % (tenant_name, cnt)) if num_updated > 0: logger.info("Updated %s tenants with online user count", num_updated)
def update_online_statistics(): """ """ logger = get_task_logger("update_statistics") tier_name = get_tier_name() config = load_config() tenants = config.get("tenants", []) logger.info("Updating statistics for %s tenants...", len(tenants)) num_updated = 0 for tenant_config in tenants: if tenant_config.get("name", "*") == "*": continue try: this_conn_string = get_connection_string(tenant_config, None, tier_name=tier_name) except TenantNotFoundError: continue with sqlalchemy_session(this_conn_string) as session: result = session.execute("""SELECT COUNT(DISTINCT(player_id)) AS cnt FROM ck_clients WHERE heartbeat > NOW() - INTERVAL '1 minutes'""") cnt = result.fetchone()[0] if cnt: num_updated += 1 tenant_name = tenant_config["name"] name = 'backend.numonline' row = session.query(Counter).filter(Counter.name == name).first() if not row: row = Counter(name=name, counter_type="absolute") session.add(row) session.commit() counter_id = row.counter_id timestamp = datetime.datetime.utcnow() add_count(counter_id, 0, timestamp, cnt, is_absolute=True, db_session=session) session.commit() print "Updated num_online for %s to %s" % (tenant_name, cnt) logger.info("Updated %s tenants with online user count", num_updated)