def run(self): interval = 30 key_list = [ "testdata:pa", "testdata:token", "testdata:publish", "testdata:translate", "testdata:move", ] for key in key_list: redis_connection.delete(key) while True: for key in key_list: duration_list = redis_connection.lrange(key, 0, -1) duration_list_count = len(duration_list) redis_connection.ltrim(key, duration_list_count, -1) if duration_list_count: avg = sum(map(float, duration_list)) / float(duration_list_count) piece_moves_per_second = duration_list_count / interval current_app.logger.info(f""" {key} count: {duration_list_count} per second: {piece_moves_per_second} average latency: {avg}""") too_active_count = int( redis_connection.get("testdata:too_active") or "0") if too_active_count: current_app.logger.info( "too active error count: {}".format(too_active_count)) redis_connection.decr("testdata:too_active", amount=too_active_count) time.sleep(interval)
def patch(self, puzzle_id): "Pong. Determine the latency for this player." response = {"message": "", "name": "", "data": {"latency": 0}} args = {} xhr_data = request.get_json() if xhr_data: args.update(xhr_data) if request.form: args.update(request.form.to_dict(flat=True)) token = args.get("token") if token is None: response["message"] = "No token" response["name"] = "error" return make_response(json.jsonify(response), 400) user = current_app.secure_cookie.get(u"user") or user_id_from_ip( request.headers.get("X-Real-IP"), skip_generate=True, validate_shared_user=False, ) if user is None: response["message"] = "Player not currently logged in." response["name"] = "error" return make_response(json.jsonify(response), 400) user = int(user) cur = db.cursor() # Validate the puzzle_id result = cur.execute( fetch_query_string("select-id-status-from-puzzle-by-puzzle_id.sql"), {"puzzle_id": puzzle_id}, ).fetchall() if not result: response["message"] = "Puzzle not available" response["name"] = "invalid" cur.close() return make_response(json.jsonify(response), 400) else: (result, col_names) = rowify(result, cur.description) puzzle = result[0].get("id") status = result[0].get("status") if status not in ( ACTIVE, IN_QUEUE, COMPLETED, FROZEN, BUGGY_UNLISTED, NEEDS_MODERATION, REBUILD, IN_RENDER_QUEUE, RENDERING, RENDERING_FAILED, MAINTENANCE, ): response["message"] = "Puzzle no longer valid" response["name"] = "invalid" cur.close() sse.publish( "Puzzle no longer valid", type="invalid", channel="puzzle:{puzzle_id}".format(puzzle_id=puzzle_id), ) return make_response(json.jsonify(response), 200) cur.close() # Determine latency for the player and record timestamp in sorted set. pingtoken_key = get_pingtoken_key(puzzle, user, token) ping_start = redis_connection.get(pingtoken_key) redis_connection.delete(pingtoken_key) ping_end = int(time.time() * 1000) if not ping_start: response["message"] = "Ignoring error when determining latency." response["name"] = "ignored" return make_response(json.jsonify(response), 200) ping_start = int(ping_start) ping_key = get_ping_key(puzzle) redis_connection.zadd(ping_key, {user: ping_end}) redis_connection.expire(ping_key, PING_EXPIRE) latency = ping_end - ping_start # Record the latency for the player redis_connection.lpush( "latency", "{user}:{timestamp}:{latency}".format( user=user, timestamp=ping_end, latency=latency ), ) # Keep only the last 1000 entries to latency redis_connection.ltrim("latency", 0, 999) response["message"] = "Latency" response["data"]["latency"] = latency response["name"] = "success" response = make_response(json.jsonify(response), 200) return response
def publishMessage(msg, karma_change, karma, points=0, complete=False): # print(topic) # print(msg) if current_app.config.get("PUZZLE_PIECES_CACHE_TTL"): stamp = redis_connection.get(f"pzstamp:{puzzle}") if stamp: pcu_key = f"pcu:{stamp}" redis_connection.rpushx(pcu_key, msg) sse.publish( msg, type="move", channel="puzzle:{puzzle_id}".format( puzzle_id=puzzleData["puzzle_id"]), ) if user != ANONYMOUS_USER_ID: points_key = "points:{user}".format(user=user) recent_points = int(redis_connection.get(points_key) or 0) if karma_change < 0 and karma <= 0 and recent_points > 0: redis_connection.decr(points_key) redis_connection.zadd("pcupdates", {puzzle: now}) if user != ANONYMOUS_USER_ID: # bump the m_date for this player on the puzzle and timeline redis_connection.zadd("timeline:{puzzle}".format(puzzle=puzzle), {user: now}) redis_connection.zadd("timeline", {user: now}) # Update player points if points != 0 and user is not None and user != ANONYMOUS_USER_ID: redis_connection.zincrby("score:{puzzle}".format(puzzle=puzzle), amount=1, value=user) redis_connection.sadd("batchuser", user) redis_connection.sadd("batchpuzzle", puzzle) redis_connection.incr("batchscore:{user}".format(user=user), amount=1) redis_connection.incr( "batchpoints:{puzzle}:{user}".format(puzzle=puzzle, user=user), amount=points, ) redis_connection.zincrby("rank", amount=1, value=user) pieces = int(puzzleData["pieces"]) # Skip increasing dots if puzzle is private earns = get_earned_points(pieces, permission=puzzleData.get("permission")) # karma = int(redis_connection.get(karma_key)) ## Max out recent points if (earns != 0 and karma >= current_app.config["MAX_KARMA"] and recent_points < current_app.config["MAX_RECENT_POINTS"]): recent_points = redis_connection.incr(points_key) # Doing small puzzles doesn't increase recent points, just extends points expiration. redis_connection.expire(points_key, current_app.config["RECENT_POINTS_EXPIRE"]) # Extend the karma points expiration since it has increased redis_connection.expire(karma_key, current_app.config["KARMA_POINTS_EXPIRE"]) # Max out karma if karma < current_app.config["MAX_KARMA"]: karma = redis_connection.incr(karma_key) karma_change += 1 redis_connection.incr("batchpoints:{user}".format(user=user), amount=earns) if complete: current_app.logger.info("puzzle {puzzle_id} is complete".format( puzzle_id=puzzleData["puzzle_id"])) sse.publish( "status:{}".format(COMPLETED), channel="puzzle:{puzzle_id}".format( puzzle_id=puzzleData["puzzle_id"]), ) r = requests.patch( "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/details/" .format( HOSTAPI=current_app.config["HOSTAPI"], PORTAPI=current_app.config["PORTAPI"], puzzle_id=puzzleData["puzzle_id"], ), json={ "status": COMPLETED, "m_date": time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), "queue": QUEUE_END_OF_LINE, }, ) if r.status_code != 200: raise Exception( "Puzzle details api error when updating puzzle to be complete" ) # Delaying helps avoid issues for players that are moving the last # piece of the puzzle as someone else completes it. delay = (current_app.config["MAX_PAUSE_PIECES_TIMEOUT"] + current_app.config["PIECE_MOVE_TIMEOUT"] + 2) current_app.logger.info( f"Delaying puzzle transfer on completed puzzle ({puzzleData['puzzle_id']}) for {delay} seconds" ) job = current_app.cleanupqueue.enqueue_in( timedelta(seconds=delay), "api.jobs.convertPiecesToDB.transfer", puzzle, result_ttl=0, ) purge_route_from_nginx_cache( "/chill/site/front/{puzzle_id}/".format( puzzle_id=puzzleData["puzzle_id"]), current_app.config.get("PURGEURLLIST"), ) if karma_change and user != ANONYMOUS_USER_ID: sse.publish( "{user}:{piece}:{karma}:{karma_change}".format( user=user, piece=piece, karma=karma + recent_points, karma_change=karma_change, ), type="karma", channel="puzzle:{puzzle_id}".format( puzzle_id=puzzleData["puzzle_id"]), ) # end = time.perf_counter() # duration = end - start # redis_connection.rpush("testdata:translate", duration) # return topic and msg mostly for testing return (msg, karma_change)
def add_photo_to_puzzle(puzzle_id, photo, description, original_filename): "" with current_app.app_context(): application_id = (current_app.config.get("UNSPLASH_APPLICATION_ID"), ) #puzzle_resources = current_app.config.get("PUZZLE_RESOURCES") application_name = current_app.config.get("UNSPLASH_APPLICATION_NAME") # Prevent going past the Unsplash rate limit by storing the current # remaining in the unsplash:rlr key. unsplash_rate_limit_remaining = int( redis_connection.get("unsplash:rlr") or UNSPLASH_RATELIMIT_LIMIT_DEMO) # Playing it safe by not getting too close to the limit. if unsplash_rate_limit_remaining < int( UNSPLASH_RATELIMIT_LIMIT_DEMO / 3): current_app.logger.info( f"Reaching the Unsplash rate limit. Requeueing puzzle {puzzle_id}" ) job = current_app.unsplashqueue.enqueue_in( timedelta(hours=1, minutes=10, seconds=42), "api.jobs.unsplash_image.add_photo_to_puzzle", puzzle_id, photo, description, original_filename, result_ttl=0, job_timeout="24h", ) return r = requests.get( f"https://api.unsplash.com/photos/{photo}", params={ "client_id": application_id, "w": 384, "h": 384, "fit": "max", }, headers={"Accept-Version": "v1"}, ) data = r.json() unsplash_rate_limit_remaining = int( r.headers.get("X-Ratelimit-Remaining", UNSPLASH_RATELIMIT_LIMIT_DEMO)) # Unsplash rate limit is by the hour. redis_connection.setex("unsplash:rlr", timedelta(hours=1), unsplash_rate_limit_remaining) # Don't use unsplash description at all # description = ( # description if description else escape(data.get("description", None)) # ) pr = PuzzleResource( puzzle_id, current_app.config, is_local_resource=current_app.config["LOCAL_PUZZLE_RESOURCES"]) tmp_dir = tempfile.mkdtemp() filename = os.path.join(tmp_dir, original_filename) links = data.get("links") if not links: raise Exception(f"Unsplash returned no links {data}") download = links.get("download") if not download: raise Exception("Unsplash returned no download") r = requests.get(download) with open(filename, "w+b") as f: f.write(r.content) pr.put_file(filename) rmtree(tmp_dir) r = requests.patch( "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/details/". format( HOSTAPI=current_app.config["HOSTAPI"], PORTAPI=current_app.config["PORTAPI"], puzzle_id=puzzle_id, ), json={ "link": "", "description": description }, ) if r.status_code != 200: raise Exception( "Puzzle details api error when setting link and description on unsplash photo upload {}" .format(puzzle_id)) # Set preview full url and fallback to small preview_full_url = data.get("urls", {}).get("custom", data.get("urls", {}).get("small")) # Use the max version to keep the image ratio and not crop it. preview_full_url = re.sub("fit=crop", "fit=max", preview_full_url) # Not using url_fix on the user.links.html since it garbles the '@'. r = requests.post( "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/files/{file_name}/" .format( HOSTAPI=current_app.config["HOSTAPI"], PORTAPI=current_app.config["PORTAPI"], puzzle_id=puzzle_id, file_name="preview_full", ), json={ "attribution": { "title": "Photo", "author_link": "{user_link}?utm_source={application_name}&utm_medium=referral" .format( user_link=data.get("user").get("links").get("html"), application_name=application_name, ), "author_name": data.get("user").get("name"), "source": "{photo_link}?utm_source={application_name}&utm_medium=referral" .format( photo_link=data.get("links").get("html"), application_name=application_name, ), "license_name": "unsplash", }, "url": preview_full_url, }, ) if r.status_code != 200: raise Exception( "Puzzle file api error when setting attribution and url for unsplash preview_full {}" .format(puzzle_id))
def patch(self, puzzle_id, piece): """ args: x y r """ def _blockplayer(): timeouts = current_app.config["BLOCKEDPLAYER_EXPIRE_TIMEOUTS"] blocked_count_ip_key = f"blocked:{ip}" expire_index = max(0, redis_connection.incr(blocked_count_ip_key) - 1) redis_connection.expire(blocked_count_ip_key, timeouts[-1]) timeout = timeouts[min(expire_index, len(timeouts) - 1)] expires = now + timeout blockedplayers_for_puzzle_key = "blockedplayers:{puzzle}".format( puzzle=puzzle) # Add the player to the blocked players list for the puzzle and # extend the expiration of the key. redis_connection.zadd(blockedplayers_for_puzzle_key, {user: expires}) redis_connection.expire(blockedplayers_for_puzzle_key, timeouts[-1]) err_msg = get_blockedplayers_err_msg(expires, expires - now) sse.publish( "{user}:{piece}:{karma}:{karma_change}".format( user=user, piece=piece, karma=karma + recent_points, karma_change=karma_change, ), type="karma", channel="puzzle:{puzzle_id}".format( puzzle_id=puzzle_data["puzzle_id"]), ) return make_response(json.jsonify(err_msg), 429) ip = request.headers.get("X-Real-IP") validate_token = (len({"all", "valid_token"}.intersection( current_app.config["PUZZLE_RULES"])) > 0) user = None now = int(time.time()) # validate the args and headers args = {} xhr_data = request.get_json() if xhr_data: args.update(xhr_data) if request.form: args.update(request.form.to_dict(flat=True)) if len(list(args.keys())) == 0: err_msg = { "msg": "invalid args", "type": "invalid", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # check if args are only in acceptable set if len(self.ACCEPTABLE_ARGS.intersection(set(args.keys()))) != len( list(args.keys())): err_msg = { "msg": "invalid args", "type": "invalid", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # validate that all values are int for key, value in list(args.items()): if not isinstance(value, int): try: args[key] = int(value) except ValueError: err_msg = { "msg": "invalid args", "type": "invalid", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) x = args.get("x") y = args.get("y") r = args.get("r") snapshot_id = request.headers.get("Snap") # Token is to make sure puzzle is still in sync. # validate the token token = request.headers.get("Token") if not token: err_msg = { "msg": "Missing token", "type": "missing", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) mark = request.headers.get("Mark") if not mark: err_msg = { "msg": "Missing mark", "type": "missing", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # start = time.perf_counter() existing_token = redis_connection.get(f"t:{mark}") if validate_token and existing_token: (m_puzzle, m_piece, m_user) = existing_token.split(":") user = int(m_user) else: user = current_app.secure_cookie.get("user") or user_id_from_ip( ip, validate_shared_user=False) if user is None: err_msg = { "msg": "Please reload the page.", "reason": "The player login was not found.", "type": "puzzlereload", "timeout": 300, } return make_response(json.jsonify(err_msg), 400) user = int(user) pzq_key = "pzq:{puzzle_id}".format(puzzle_id=puzzle_id) pzq_fields = [ "puzzle", "table_width", "table_height", "permission", "pieces", ] puzzle_data = dict( zip(pzq_fields, redis_connection.hmget(pzq_key, pzq_fields))) puzzle = puzzle_data.get("puzzle") if puzzle is None: req = requests.get( "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/details/" .format( HOSTAPI=current_app.config["HOSTAPI"], PORTAPI=current_app.config["PORTAPI"], puzzle_id=puzzle_id, ), ) if req.status_code >= 400: err_msg = {"msg": "puzzle not available", "type": "missing"} return make_response(json.jsonify(err_msg), req.status_code) try: result = req.json() except ValueError as err: err_msg = {"msg": "puzzle not available", "type": "missing"} return make_response(json.jsonify(err_msg), 500) if result.get("status") not in (ACTIVE, BUGGY_UNLISTED): err_msg = {"msg": "puzzle not available", "type": "missing"} return make_response(json.jsonify(err_msg), 404) puzzle_data = result puzzle_data["puzzle"] = result["id"] redis_connection.hmset( pzq_key, { "puzzle": puzzle_data["puzzle"], "table_width": puzzle_data["table_width"], "table_height": puzzle_data["table_height"], "permission": puzzle_data["permission"], "pieces": puzzle_data["pieces"], }, ) redis_connection.expire(pzq_key, 300) else: puzzle_data["puzzle"] = int(puzzle_data["puzzle"]) puzzle_data["table_width"] = int(puzzle_data["table_width"]) puzzle_data["table_height"] = int(puzzle_data["table_height"]) puzzle_data["permission"] = int(puzzle_data["permission"]) puzzle_data["pieces"] = int(puzzle_data["pieces"]) puzzle = int(puzzle_data["puzzle"]) puzzle_data["puzzle_id"] = puzzle_id puzzle_piece_token_key = get_puzzle_piece_token_key(puzzle, piece) validate_token = (len({"all", "valid_token"}.intersection( current_app.config["PUZZLE_RULES"])) > 0) if validate_token: token_and_mark = redis_connection.get(puzzle_piece_token_key) if token_and_mark: (valid_token, other_mark) = token_and_mark.split(":") # other_user = int(other_user) if token != valid_token: err_msg = increase_ban_time(user, TOKEN_INVALID_BAN_TIME_INCR) err_msg["reason"] = "Token is invalid" return make_response(json.jsonify(err_msg), 409) if mark != other_mark: err_msg = increase_ban_time(user, TOKEN_INVALID_BAN_TIME_INCR) err_msg["reason"] = "Player is invalid" return make_response(json.jsonify(err_msg), 409) else: err_msg = { "msg": "Token has expired", "type": "expiredtoken", "reason": "", } return make_response(json.jsonify(err_msg), 409) # Expire the token since it shouldn't be used again if validate_token: redis_connection.delete(puzzle_piece_token_key) redis_connection.delete(f"t:{mark}") if (len({"all", "piece_translate_rate"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): err_msg = bump_count(user) if err_msg.get("type") == "bannedusers": return make_response(json.jsonify(err_msg), 429) # Check if piece will be moved to within boundaries if x and (x < 0 or x > puzzle_data["table_width"]): err_msg = { "msg": "Piece movement out of bounds", "type": "invalidpiecemove", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) if y and (y < 0 or y > puzzle_data["table_height"]): err_msg = { "msg": "Piece movement out of bounds", "type": "invalidpiecemove", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # Check again if piece can be moved and hasn't changed since getting token has_y = redis_connection.hget( "pc:{puzzle}:{piece}".format(puzzle=puzzle, piece=piece), "y") if has_y is None: err_msg = {"msg": "piece not available", "type": "missing"} return make_response(json.jsonify(err_msg), 404) if redis_connection.sismember(f"pcfixed:{puzzle}", piece) == 1: # immovable err_msg = { "msg": "piece can't be moved", "type": "immovable", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) (_, _, _, origin_x, origin_y, _) = unpack_token(token) redis_connection.publish( f"enforcer_piece_translate:{puzzle}", f"{user}:{piece}:{origin_x}:{origin_y}:{x}:{y}", ) points_key = "points:{user}".format(user=user) recent_points = int(redis_connection.get(points_key) or "0") karma_key = init_karma_key(redis_connection, puzzle, ip, current_app.config) karma = int(redis_connection.get(karma_key)) karma_change = 0 current_app.logger.debug( f"user: {user} ip: {ip} karma: {karma} recent_points {recent_points}" ) if (len({"all", "puzzle_open_rate"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease recent points if this is a new puzzle that user hasn't moved pieces on yet in the last hour pzrate_key = "pzrate:{user}:{today}".format( user=user, today=datetime.date.today().isoformat()) if redis_connection.sadd(pzrate_key, puzzle) == 1: # New puzzle that player hasn't moved a piece on in the last hour. redis_connection.expire(pzrate_key, HOUR) if recent_points > 0: redis_connection.decr(points_key) if (len({"all", "piece_move_rate"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease karma if piece movement rate has passed threshold pcrate_key = f"pcrate:{puzzle}:{user}" moves = redis_connection.incr(pcrate_key) redis_connection.expire(pcrate_key, PIECE_MOVEMENT_RATE_TIMEOUT) if moves > PIECE_MOVEMENT_RATE_LIMIT: if karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 if (len({"all", "hot_piece"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease karma when moving the same piece multiple times within # a minute. hotpc_key = f"hotpc:{puzzle}:{user}:{piece}" recent_move_count = redis_connection.incr(hotpc_key) redis_connection.expire(hotpc_key, HOT_PIECE_MOVEMENT_RATE_TIMEOUT) if recent_move_count > MOVES_BEFORE_PENALTY: if karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 if (len({"all", "hot_spot"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease the karma for the player if the piece is in a hotspot. hotspot_piece_key = f"hotspot:{puzzle}:{user}:{piece}" hotspot_count = int(redis_connection.get(hotspot_piece_key) or "0") if hotspot_count > HOTSPOT_LIMIT: if karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 if karma_change < 0: # Decrease recent points for a piece move that decreased karma if recent_points > 0 and karma_change < 0: recent_points = redis_connection.decr(points_key) if karma + recent_points <= 0: return _blockplayer() piece_move_timeout = current_app.config["PIECE_MOVE_TIMEOUT"] # Use a custom built and managed queue to prevent multiple processes # from running the attempt_piece_movement concurrently on the same # puzzle. pzq_current_key = "pzq_current:{puzzle}".format(puzzle=puzzle) pzq_next_key = "pzq_next:{puzzle}".format(puzzle=puzzle) # The attempt_piece_movement bumps the pzq_current by 1 pzq_next = redis_connection.incr(pzq_next_key, amount=1) # Set the expire in case it fails to reach expire in attempt_piece_movement. redis_connection.expire(pzq_current_key, piece_move_timeout + 2) redis_connection.expire(pzq_next_key, piece_move_timeout + 2) attempt_count = 0 attempt_timestamp = time.time() timeout = attempt_timestamp + piece_move_timeout while attempt_timestamp < timeout: pzq_current = int(redis_connection.get(pzq_current_key) or "0") if pzq_current == pzq_next - 1: try: snapshot_msg = None snapshot_karma_change = False if snapshot_id: snapshot_key = f"snap:{snapshot_id}" snapshot = redis_connection.get(snapshot_key) if snapshot: snapshot_list = snapshot.split(":") snapshot_pzq = int(snapshot_list.pop(0)) if snapshot_pzq != pzq_current: # Check if any adjacent pieces are within range of x, y, r # Within that list check if any have moved # With the first one that has moved that was within range attempt piece movement on that by using adjusted x, y, r snaps = list( map(lambda x: x.split("_"), snapshot_list)) adjacent_piece_ids = list( map(lambda x: int(x[0]), snaps)) adjacent_piece_props_snaps = list( map(lambda x: x[1:], snaps)) property_list = [ "x", "y", "r", # "g" ] results = [] with redis_connection.pipeline( transaction=True) as pipe: for adjacent_piece_id in adjacent_piece_ids: pc_puzzle_adjacent_piece_key = ( f"pc:{puzzle}:{adjacent_piece_id}") pipe.hmget( pc_puzzle_adjacent_piece_key, property_list, ) results = pipe.execute() for ( a_id, snapshot_adjacent, updated_adjacent, ) in zip( adjacent_piece_ids, adjacent_piece_props_snaps, results, ): updated_adjacent = list( map( lambda x: x if isinstance(x, str) else "", updated_adjacent, )) adjacent_offset = snapshot_adjacent.pop() if (snapshot_adjacent != updated_adjacent ) and adjacent_offset: (a_offset_x, a_offset_y) = map( int, adjacent_offset.split(",")) (a_snap_x, a_snap_y) = map( int, snapshot_adjacent[:2]) # Check if the x,y is within range of the adjacent piece that has moved piece_join_tolerance = current_app.config[ "PIECE_JOIN_TOLERANCE"] if (abs((a_snap_x + a_offset_x) - x) <= piece_join_tolerance and abs((a_snap_y + a_offset_y) - y) <= piece_join_tolerance): (a_moved_x, a_moved_y) = map( int, updated_adjacent[:2]) # Decrease pzq_current since it is moving an extra piece out of turn redis_connection.decr( pzq_current_key, amount=1) ( snapshot_msg, snapshot_karma_change, ) = attempt_piece_movement( ip, user, puzzle_data, piece, a_moved_x + a_offset_x, a_moved_y + a_offset_y, r, karma_change, karma, ) break except: pzq_current = int( redis_connection.get(pzq_current_key) or "0") if pzq_current == pzq_next - 1: # skip this piece move attempt redis_connection.incr(pzq_current_key, amount=1) current_app.logger.warning( "results123 other error {}".format(sys.exc_info()[0])) raise (msg, karma_change) = attempt_piece_movement( ip, user, puzzle_data, piece, x, y, r, karma_change or snapshot_karma_change, karma, ) if isinstance(snapshot_msg, str) and isinstance(msg, str): msg = snapshot_msg + msg break current_app.logger.debug(f"pzq_current is {pzq_current}") attempt_timestamp = time.time() attempt_count = attempt_count + 1 # TODO: The sleep time should be set based on an average time it # takes to process piece movements. time.sleep(0.02) # Decrease karma here to potentially block a player that # continually tries to move pieces when a puzzle is too active. if (len({"all", "too_active"}.intersection( current_app.config["PUZZLE_RULES"])) > 0) and karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 current_app.logger.debug( f"Puzzle ({puzzle}) piece move attempts: {attempt_count}") if attempt_timestamp >= timeout: current_app.logger.warn( f"Puzzle {puzzle} is too active. Attempt piece move timed out after trying {attempt_count} times." ) err_msg = { "msg": "Piece movement timed out.", "type": "error", "reason": "Puzzle is too active", "timeout": piece_move_timeout, } return make_response( json.jsonify(err_msg), 503, ) # Check msg for error or if piece can't be moved if not isinstance(msg, str): if isinstance(msg, dict): return make_response(json.jsonify(msg), 400) else: current_app.logger.warning("Unknown error: {}".format(msg)) return make_response( json.jsonify({ "msg": msg, "type": "error", "timeout": 3 }), 500) # publish just the bit movement so it matches what this player did bitmsg = formatBitMovementString(user, x, y) sse.publish( bitmsg, type="move", channel="puzzle:{puzzle_id}".format(puzzle_id=puzzle_id), ) if karma_change < 0: if karma + recent_points <= 0: return _blockplayer() # end = time.perf_counter() # current_app.logger.debug("PuzzlePiecesMovePublishView {}".format(end - start)) return make_response("", 204)
def get(self, puzzle_id, piece): ip = request.headers.get("X-Real-IP") user = current_app.secure_cookie.get("user") or user_id_from_ip( ip, validate_shared_user=False) if user is None: err_msg = { "msg": "Please reload the page.", "reason": "The player login was not found.", "type": "puzzlereload", "timeout": 300, } response = make_response(json.jsonify(err_msg), 400) expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) current_app.secure_cookie.set("user", "", response, expires=expires) current_app.secure_cookie.set("shareduser", "", response, expires=expires) return response user = int(user) mark = request.args.get("mark") if not isinstance(mark, str) or len(mark) != 10: return make_response( json.jsonify({ "msg": "invalid args", "type": "invalid", }), 400, ) now = int(time.time()) # start = time.perf_counter() pzq_key = "pzq:{puzzle_id}".format(puzzle_id=puzzle_id) puzzle = redis_connection.hget(pzq_key, "puzzle") if not puzzle: current_app.logger.debug("no puzzle; fetch puzzle") r = requests.get( "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/details/" .format( HOSTAPI=current_app.config["HOSTAPI"], PORTAPI=current_app.config["PORTAPI"], puzzle_id=puzzle_id, ), ) if r.status_code >= 400: # 400 if puzzle does not exist err_msg = { "msg": "puzzle is not ready at this time. Please reload the page.", "type": "puzzleimmutable", } return make_response(json.jsonify(err_msg), r.status_code) try: result = r.json() except ValueError as err: err_msg = { "msg": "puzzle is not ready at this time. Please reload the page.", "type": "puzzleimmutable", } return make_response(json.jsonify(err_msg), 500) if result.get("status") not in (ACTIVE, BUGGY_UNLISTED): err_msg = { "msg": "puzzle is not ready at this time. Please reload the page.", "type": "puzzleimmutable", } return make_response(json.jsonify(err_msg), 400) puzzle = result["id"] puzzle = int(puzzle) pc_puzzle_piece_key = "pc:{puzzle}:{piece}".format(puzzle=puzzle, piece=piece) piece_properties = _int_piece_properties( redis_connection.hgetall(pc_puzzle_piece_key)) pcfixed = set(redis_connection.smembers(f"pcfixed:{puzzle}")) if piece_properties.get("y") is None: # 400 if puzzle does not exist or piece is not found # Only puzzles in ACTIVE state can be mutated err_msg = { "msg": "puzzle pieces can't be moved at this time. Please reload the page.", "type": "puzzleimmutable", } return make_response(json.jsonify(err_msg), 400) if piece in pcfixed: # immovable err_msg = { "msg": "piece can't be moved", "type": "immovable", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # TODO: remove old entries in blockedplayers:{puzzle} blockedplayers_for_puzzle_key = "blockedplayers:{puzzle}".format( puzzle=puzzle) blockedplayers_expires = redis_connection.zscore( blockedplayers_for_puzzle_key, user) if blockedplayers_expires and blockedplayers_expires > now: err_msg = get_blockedplayers_err_msg(blockedplayers_expires, blockedplayers_expires - now) return make_response(json.jsonify(err_msg), 429) token = pack_token(nanoid.generate(size=8), puzzle, user, piece, piece_properties) redis_connection.publish( f"enforcer_token_request:{puzzle}", token, ) def move_bit_icon_to_piece(x, y): # Claim the piece by showing the bit icon next to it. msg = formatBitMovementString(user, x, y) sse.publish( msg, type="move", channel="puzzle:{puzzle_id}".format(puzzle_id=puzzle_id), ) # Snapshot of adjacent pieces at time of token request snapshot_id = None adjacent_pieces_list = _get_adjacent_pieces_list(piece_properties) adjacent_property_list = ["x", "y", "r", "g", str(piece)] pzq_current_key = "pzq_current:{puzzle}".format(puzzle=puzzle) results = [] with redis_connection.pipeline(transaction=False) as pipe: for adjacent_piece in adjacent_pieces_list: pc_puzzle_adjacent_piece_key = "pc:{puzzle}:{adjacent_piece}".format( puzzle=puzzle, adjacent_piece=adjacent_piece) pipe.hmget(pc_puzzle_adjacent_piece_key, adjacent_property_list) pipe.get(pzq_current_key) results = pipe.execute() pzq_current = "0" if not isinstance(results[-1], list): pzq_current = results.pop() or pzq_current adjacent_properties = dict( zip( adjacent_pieces_list, map(lambda x: dict(zip(adjacent_property_list, x)), results), )) snapshot = [] for a_piece, a_props in adjacent_properties.items(): # skip any that are immovable if a_piece in pcfixed: continue # skip any that are in the same group if a_props.get("g") is not None and a_props.get( "g") == piece_properties.get("g"): continue # skip any that don't have offsets (adjacent edge piece) if not a_props.get(str(piece)): continue if a_props.get("g") is None: a_props["g"] = "" snapshot.append("_".join([ str(a_piece), a_props.get("x", ""), a_props.get("y", ""), a_props.get("r", ""), a_props.get(str(piece), ""), # a_props.get("g") ])) if len(snapshot): snapshot_id = nanoid.generate(size=8) snapshot_key = f"snap:{snapshot_id}" snapshot.insert(0, pzq_current) redis_connection.set(snapshot_key, ":".join(snapshot)) redis_connection.expire( snapshot_key, current_app.config["MAX_PAUSE_PIECES_TIMEOUT"] + (current_app.config["PIECE_MOVE_TIMEOUT"] + 2), ) validate_token = (len({"all", "valid_token"}.intersection( current_app.config["PUZZLE_RULES"])) > 0) TOKEN_LOCK_TIMEOUT = current_app.config["TOKEN_LOCK_TIMEOUT"] TOKEN_EXPIRE_TIMEOUT = current_app.config["TOKEN_EXPIRE_TIMEOUT"] if not validate_token: move_bit_icon_to_piece(piece_properties.get("x"), piece_properties.get("y")) response = { "token": token, "lock": now + TOKEN_LOCK_TIMEOUT, "expires": now + TOKEN_EXPIRE_TIMEOUT, } if snapshot_id: response["snap"] = snapshot_id return make_response(json.jsonify(response), 200) # Check if user already has a token for this puzzle. This would mean # that the user tried moving another piece before the locked piece # finished moving. existing_token = redis_connection.get(f"t:{mark}") if existing_token: # Temporary ban the player when clicking a piece and not # dropping it before clicking another piece. # Ban the user for a few seconds err_msg = increase_ban_time(user, TOKEN_LOCK_TIMEOUT) err_msg[ "reason"] = "Concurrent piece movements on this puzzle from the same player are not allowed." return make_response(json.jsonify(err_msg), 429) piece_token_queue_key = get_puzzle_piece_token_queue_key(puzzle, piece) with redis_connection.pipeline(transaction=False) as pipe: pipe.zrank(piece_token_queue_key, mark) pipe.expire(piece_token_queue_key, TOKEN_LOCK_TIMEOUT + 5) (queue_rank, _) = pipe.execute() if queue_rank is None: # Append this player to a queue for getting the next token. This # will prevent the player with the lock from continually locking the # same piece. with redis_connection.pipeline(transaction=False) as pipe: pipe.zadd(piece_token_queue_key, {mark: now}) pipe.zrank(piece_token_queue_key, mark) (_, queue_rank) = pipe.execute() # Check if token on piece is in a queue and if the player requesting it # is the player that is next. Show an error message if not. if queue_rank > 0: err_msg = { "msg": "Another player is waiting to move this piece", "type": "piecequeue", "reason": "Piece queue {}".format(queue_rank), "expires": now + TOKEN_LOCK_TIMEOUT, "timeout": TOKEN_LOCK_TIMEOUT, } return make_response(json.jsonify(err_msg), 409) # Check if token on piece is still owned by another user puzzle_piece_token_key = get_puzzle_piece_token_key(puzzle, piece) existing_token_and_mark = redis_connection.get(puzzle_piece_token_key) if existing_token_and_mark: (other_token, other_mark) = existing_token_and_mark.split(":") puzzle_and_piece_and_user = redis_connection.get(f"t:{other_mark}") # Check if there is a lock on this piece by other user if puzzle_and_piece_and_user: ( other_puzzle, other_piece, other_user, ) = puzzle_and_piece_and_user.split(":") other_puzzle = int(other_puzzle) other_piece = int(other_piece) other_user = int(other_user) if other_puzzle == puzzle and other_piece == piece: # Other user has a lock on this piece err_msg = { "msg": "Another player is moving this piece", "type": "piecelock", "reason": "Piece locked", } return make_response(json.jsonify(err_msg), 409) # This piece is up for grabs since it has been more then 5 seconds since # another player has grabbed it. with redis_connection.pipeline(transaction=False) as pipe: # Remove player from the piece token queue pipe.zrem(piece_token_queue_key, mark) pipe.set( puzzle_piece_token_key, f"{token}:{mark}", ex=TOKEN_EXPIRE_TIMEOUT, ) pipe.set( f"t:{mark}", f"{puzzle}:{piece}:{user}", ex=TOKEN_LOCK_TIMEOUT, ) pipe.execute() move_bit_icon_to_piece(piece_properties.get("x"), piece_properties.get("y")) response = { "token": token, "lock": now + TOKEN_LOCK_TIMEOUT, "expires": now + TOKEN_EXPIRE_TIMEOUT, } if snapshot_id: response["snap"] = snapshot_id # end = time.perf_counter() # current_app.logger.debug("PuzzlePieceTokenView {}".format(end - start)) return make_response(json.jsonify(response), 200)
def patch(self, puzzle_id, piece): """ args: x y r """ ip = "0" # No ip is used here for karma # Ignore publish of user data when anonymous user user = ANONYMOUS_USER_ID piece_move_timeout = current_app.config["PIECE_MOVE_TIMEOUT"] # validate the args and headers args = {} xhr_data = request.get_json() if xhr_data: args.update(xhr_data) if request.form: args.update(request.form.to_dict(flat=True)) x = args.get("x") y = args.get("y") r = args.get("r") current_app.logger.debug("Test internal piece move") pzq_key = "pzq:{puzzle_id}".format(puzzle_id=puzzle_id) pzq_fields = [ "puzzle", "table_width", "table_height", "permission", "pieces", ] puzzle_data = dict( zip(pzq_fields, redis_connection.hmget(pzq_key, pzq_fields))) puzzle = puzzle_data.get("puzzle") if puzzle is None: err_msg = { "msg": "No puzzle", } return make_response(json.jsonify(err_msg), 400) puzzle_data["puzzle"] = int(puzzle_data["puzzle"]) puzzle_data["table_width"] = int(puzzle_data["table_width"]) puzzle_data["table_height"] = int(puzzle_data["table_height"]) puzzle_data["permission"] = int(puzzle_data["permission"]) puzzle_data["pieces"] = int(puzzle_data["pieces"]) puzzle_data["puzzle_id"] = puzzle_id puzzle = puzzle_data["puzzle"] puzzle = int(puzzle_data["puzzle"]) if redis_connection.sismember(f"pcfixed:{puzzle}", piece) == 1: # immovable err_msg = { "msg": "piece can't be moved", } return make_response(json.jsonify(err_msg), 400) pzq_current_key = "pzq_current:{puzzle}".format(puzzle=puzzle) pzq_next_key = "pzq_next:{puzzle}".format(puzzle=puzzle) # The attempt_piece_movement bumps the pzq_current by 1 pzq_next = redis_connection.incr(pzq_next_key, amount=1) # Set the expire in case it fails to reach expire in attempt_piece_movement. redis_connection.expire(pzq_current_key, piece_move_timeout + 2) redis_connection.expire(pzq_next_key, piece_move_timeout + 2) karma = 1 karma_change = 0 attempt_timestamp = time.time() timeout = attempt_timestamp + piece_move_timeout while attempt_timestamp < timeout: pzq_current = int(redis_connection.get(pzq_current_key) or "0") if pzq_current == pzq_next - 1: (_, _) = attempt_piece_movement( ip, user, puzzle_data, piece, x, y, r, karma_change, karma, ) break return make_response("", 204)
def publishMessage(msg, karma_change, points=0, complete=False): # print(topic) # print(msg) sse.publish( msg, type="move", channel="puzzle:{puzzle_id}".format( puzzle_id=puzzleData["puzzle_id"]), ) now = int(time.time()) redis_connection.zadd("pcupdates", {puzzle: now}) # TODO: # return (topic, msg) # bump the m_date for this player on the puzzle and timeline redis_connection.zadd("timeline:{puzzle}".format(puzzle=puzzle), {user: now}) redis_connection.zadd("timeline", {user: now}) # Update player points if points != 0 and user != None: redis_connection.zincrby("score:{puzzle}".format(puzzle=puzzle), amount=1, value=user) redis_connection.sadd("batchuser", user) redis_connection.sadd("batchpuzzle", puzzle) redis_connection.incr("batchscore:{user}".format(user=user), amount=1) redis_connection.incr( "batchpoints:{puzzle}:{user}".format(puzzle=puzzle, user=user), amount=points, ) redis_connection.zincrby("rank", amount=1, value=user) points_key = "points:{user}".format(user=user) pieces = int(puzzleData["pieces"]) # Skip increasing dots if puzzle is private earns = get_earned_points(pieces, permission=puzzleData.get("permission")) karma = int(redis_connection.get(karma_key)) ## Max out recent points # if earns != 0: # recent_points = int(redis_connection.get(points_key) or 0) # if karma + 1 + recent_points + earns < MAX_KARMA: # redis_connection.incr(points_key, amount=earns) # Doing small puzzles doesn't increase recent points, just extends points expiration. redis_connection.expire(points_key, RECENT_POINTS_EXPIRE) karma_change += 1 # Extend the karma points expiration since it has increased redis_connection.expire(karma_key, KARMA_POINTS_EXPIRE) # Max out karma if karma < MAX_KARMA: redis_connection.incr(karma_key) else: # Max out points if earns != 0: recent_points = int(redis_connection.get(points_key) or 0) if recent_points + earns <= MAX_RECENT_POINTS: redis_connection.incr(points_key, amount=earns) redis_connection.incr("batchpoints:{user}".format(user=user), amount=earns) # TODO: Optimize by using redis for puzzle status if complete: current_app.logger.info("puzzle {puzzle_id} is complete".format( puzzle_id=puzzleData["puzzle_id"])) cur = db.cursor() cur.execute( fetch_query_string("update_puzzle_status_for_puzzle.sql"), { "puzzle": puzzle, "status": COMPLETED }, ) cur.execute( fetch_query_string("update_puzzle_m_date_to_now.sql"), { "puzzle": puzzle, "modified": now }, ) cur.execute( fetch_query_string("update_puzzle_queue_for_puzzle.sql"), { "puzzle": puzzle, "queue": QUEUE_END_OF_LINE }, ) db.commit() sse.publish( "status:{}".format(COMPLETED), channel="puzzle:{puzzle_id}".format( puzzle_id=puzzleData["puzzle_id"]), ) job = current_app.cleanupqueue.enqueue_call( func="api.jobs.convertPiecesToDB.transfer", args=(puzzle, ), result_ttl=0) purge_route_from_nginx_cache( "/chill/site/front/{puzzle_id}/".format( puzzle_id=puzzleData["puzzle_id"]), current_app.config.get("PURGEURLLIST"), ) db.commit() cur.close() # return topic and msg mostly for testing return (msg, karma_change)
def translate(ip, user, puzzleData, piece, x, y, r, karma_change, db_file=None): def publishMessage(msg, karma_change, points=0, complete=False): # print(topic) # print(msg) sse.publish( msg, type="move", channel="puzzle:{puzzle_id}".format( puzzle_id=puzzleData["puzzle_id"]), ) now = int(time.time()) redis_connection.zadd("pcupdates", {puzzle: now}) # TODO: # return (topic, msg) # bump the m_date for this player on the puzzle and timeline redis_connection.zadd("timeline:{puzzle}".format(puzzle=puzzle), {user: now}) redis_connection.zadd("timeline", {user: now}) # Update player points if points != 0 and user != None: redis_connection.zincrby("score:{puzzle}".format(puzzle=puzzle), amount=1, value=user) redis_connection.sadd("batchuser", user) redis_connection.sadd("batchpuzzle", puzzle) redis_connection.incr("batchscore:{user}".format(user=user), amount=1) redis_connection.incr( "batchpoints:{puzzle}:{user}".format(puzzle=puzzle, user=user), amount=points, ) redis_connection.zincrby("rank", amount=1, value=user) points_key = "points:{user}".format(user=user) pieces = int(puzzleData["pieces"]) # Skip increasing dots if puzzle is private earns = get_earned_points(pieces, permission=puzzleData.get("permission")) karma = int(redis_connection.get(karma_key)) ## Max out recent points # if earns != 0: # recent_points = int(redis_connection.get(points_key) or 0) # if karma + 1 + recent_points + earns < MAX_KARMA: # redis_connection.incr(points_key, amount=earns) # Doing small puzzles doesn't increase recent points, just extends points expiration. redis_connection.expire(points_key, RECENT_POINTS_EXPIRE) karma_change += 1 # Extend the karma points expiration since it has increased redis_connection.expire(karma_key, KARMA_POINTS_EXPIRE) # Max out karma if karma < MAX_KARMA: redis_connection.incr(karma_key) else: # Max out points if earns != 0: recent_points = int(redis_connection.get(points_key) or 0) if recent_points + earns <= MAX_RECENT_POINTS: redis_connection.incr(points_key, amount=earns) redis_connection.incr("batchpoints:{user}".format(user=user), amount=earns) # TODO: Optimize by using redis for puzzle status if complete: current_app.logger.info("puzzle {puzzle_id} is complete".format( puzzle_id=puzzleData["puzzle_id"])) cur = db.cursor() cur.execute( fetch_query_string("update_puzzle_status_for_puzzle.sql"), { "puzzle": puzzle, "status": COMPLETED }, ) cur.execute( fetch_query_string("update_puzzle_m_date_to_now.sql"), { "puzzle": puzzle, "modified": now }, ) cur.execute( fetch_query_string("update_puzzle_queue_for_puzzle.sql"), { "puzzle": puzzle, "queue": QUEUE_END_OF_LINE }, ) db.commit() sse.publish( "status:{}".format(COMPLETED), channel="puzzle:{puzzle_id}".format( puzzle_id=puzzleData["puzzle_id"]), ) job = current_app.cleanupqueue.enqueue_call( func="api.jobs.convertPiecesToDB.transfer", args=(puzzle, ), result_ttl=0) purge_route_from_nginx_cache( "/chill/site/front/{puzzle_id}/".format( puzzle_id=puzzleData["puzzle_id"]), current_app.config.get("PURGEURLLIST"), ) db.commit() cur.close() # return topic and msg mostly for testing return (msg, karma_change) p = "" points = 0 puzzle = puzzleData["puzzle"] karma_key = init_karma_key(redis_connection, puzzle, ip) karma = int(redis_connection.get(karma_key)) # Restrict piece to within table boundaries if x < 0: x = 0 if x > puzzleData["table_width"]: x = puzzleData["table_width"] if y < 0: y = 0 if y > puzzleData["table_height"]: y = puzzleData["table_height"] pc_puzzle_piece_key = "pc:{puzzle}:{piece}".format(puzzle=puzzle, piece=piece) # Get the puzzle piece origin position # TODO: Handle the potential error if the hmget here gets a None value for x and y. (originX, originY) = list( map( int, redis_connection.hmget(pc_puzzle_piece_key, ["x", "y"]), )) piece_mutate_process = PieceMutateProcess( redis_connection, puzzle, piece, x, y, r, piece_count=puzzleData.get("pieces")) (msg, status) = piece_mutate_process.start() if status == "stacked": # Decrease karma since stacking if karma > MIN_KARMA: redis_connection.decr(karma_key) karma_change -= 1 return publishMessage( msg, karma_change, ) elif status == "moved": if (len(piece_mutate_process.all_other_pieces_in_piece_group) > PIECE_GROUP_MOVE_MAX_BEFORE_PENALTY): if karma > MIN_KARMA: redis_connection.decr(karma_key) karma_change -= 1 return publishMessage( msg, karma_change, ) elif status == "joined": return publishMessage( msg, karma_change, points=4, complete=False, ) elif status == "completed": return publishMessage( msg, karma_change, points=4, complete=True, ) else: pass # TODO: handle failed status return publishMessage( msg, karma_change, )
def patch(self, puzzle_id): "Pong. Determine the latency for this player." response = {"message": "", "name": "", "data": {"latency": 0}} args = {} xhr_data = request.get_json() if xhr_data: args.update(xhr_data) if request.form: args.update(request.form.to_dict(flat=True)) token = args.get("token") if token == None: response["message"] = "No token" response["name"] = "error" return make_response(json.jsonify(response), 400) user = current_app.secure_cookie.get(u"user") or user_id_from_ip( request.headers.get("X-Real-IP"), skip_generate=True) if user == None: response["message"] = "Player not currently logged in." response["name"] = "error" return make_response(json.jsonify(response), 400) user = int(user) cur = db.cursor() # Validate the puzzle_id result = cur.execute( fetch_query_string("select_viewable_puzzle_id.sql"), { "puzzle_id": puzzle_id }, ).fetchall() if not result: response["message"] = "Invalid puzzle id." response["name"] = "error" cur.close() db.commit() return make_response(json.jsonify(response), 400) else: (result, col_names) = rowify(result, cur.description) puzzle = result[0].get("puzzle") status = result[0].get("status") if status != ACTIVE: response["message"] = "Puzzle not active" response["name"] = "invalid" cur.close() db.commit() return make_response(json.jsonify(response), 200) # Determine latency for the player and record timestamp in sorted set. pingtoken_key = get_pingtoken_key(puzzle, user, token) ping_start = redis_connection.get(pingtoken_key) redis_connection.delete(pingtoken_key) ping_end = int(time.time() * 1000) if not ping_start: response["message"] = "Ignoring error when determining latency." response["name"] = "ignored" return make_response(json.jsonify(response), 200) ping_start = int(ping_start) ping_key = get_ping_key(puzzle) redis_connection.zadd(ping_key, {user: ping_end}) redis_connection.expire(ping_key, PING_EXPIRE) latency = ping_end - ping_start # Record the latency for the player redis_connection.lpush( "latency", "{user}:{timestamp}:{latency}".format(user=user, timestamp=ping_end, latency=latency), ) # Keep only the last 1000 entries to latency redis_connection.ltrim("latency", 0, 999) response["message"] = "Latency" response["data"]["latency"] = latency response["name"] = "success" response = make_response(json.jsonify(response), 200) return response