def move_random_piece(self): piece_id = choice(self.movable_pieces) x = randint(0, self.table_width - 100) y = randint(0, self.table_height - 100) piece_token = self.user_session.get_data( "/puzzle/{puzzle_id}/piece/{piece_id}/token/?mark={mark}".format( puzzle_id=self.puzzle_id, piece_id=piece_id, mark=self.puzzle_pieces["mark"], )) if piece_token and piece_token.get("token"): puzzle_pieces_move = self.user_session.patch_data( "/puzzle/{puzzle_id}/piece/{piece_id}/move/".format( puzzle_id=self.puzzle_id, piece_id=piece_id), payload={ "x": x, "y": y }, headers={"Token": piece_token["token"]}, ) if puzzle_pieces_move: if puzzle_pieces_move.get("msg") == "boing": raise Exception("boing") # Reset karma:puzzle:ip redis key when it gets low if puzzle_pieces_move["karma"] < 2: print("resetting karma for {ip}".format( ip=self.user_session.ip)) karma_key = init_karma_key(redis_connection, self.puzzle, self.user_session.ip) redis_connection.delete(karma_key)
def delete_puzzle_timeline(puzzle_id): "" cur = db.cursor() result = cur.execute( fetch_query_string("select-internal-puzzle-details-for-puzzle_id.sql"), { "puzzle_id": puzzle_id }, ).fetchall() if not result: err_msg = {"msg": "No puzzle found", "status_code": 400} cur.close() return err_msg (result, col_names) = rowify(result, cur.description) puzzle_data = result[0] puzzle = puzzle_data["id"] result = cur.execute(fetch_query_string("delete_puzzle_timeline.sql"), {"puzzle": puzzle}) cur.close() db.commit() redis_connection.delete("timeline:{puzzle}".format(puzzle=puzzle)) redis_connection.delete("score:{puzzle}".format(puzzle=puzzle)) msg = {"rowcount": result.rowcount, "msg": "Deleted", "status_code": 200} return msg
def run(self): interval = 30 key_list = [ "testdata:pa", "testdata:token", "testdata:publish", "testdata:translate", "testdata:move", ] for key in key_list: redis_connection.delete(key) while True: for key in key_list: duration_list = redis_connection.lrange(key, 0, -1) duration_list_count = len(duration_list) redis_connection.ltrim(key, duration_list_count, -1) if duration_list_count: avg = sum(map(float, duration_list)) / float(duration_list_count) piece_moves_per_second = duration_list_count / interval current_app.logger.info(f""" {key} count: {duration_list_count} per second: {piece_moves_per_second} average latency: {avg}""") too_active_count = int( redis_connection.get("testdata:too_active") or "0") if too_active_count: current_app.logger.info( "too active error count: {}".format(too_active_count)) redis_connection.decr("testdata:too_active", amount=too_active_count) time.sleep(interval)
def __init__(self, puzzle_id, ips, max_delay=0.1): self.puzzle_id = puzzle_id self.ips = ips self.max_delay = max_delay self.user_sessions = list(map(lambda ip: UserSession(ip=ip), self.ips)) cur = db.cursor() result = cur.execute( "select id, table_width, table_height from Puzzle where puzzle_id = :puzzle_id;", { "puzzle_id": self.puzzle_id }, ).fetchall() (result, col_names) = rowify(result, cur.description) self.puzzle_details = result[0] cur.close() redis_connection.delete("testdata:too_active")
def all_tasks(): """ Cycle through all tasks in the task registry and run them at their set interval. """ # Reset scheduler to start by removing any previous scheduled tasks redis_connection.delete(scheduler_key) now = int(time()) tasks = {} for index in range(len(task_registry)): # Create each task with an id corresponding to the index tasks[index] = task_registry[index](index) task_ids_scheduled_to_now = dict( zip(tasks.keys(), map(lambda x: now, range(len(tasks))))) # reset all tasks to be scheduled now redis_connection.zadd(scheduler_key, task_ids_scheduled_to_now) def cycle_over_tasks(): "Cycle over each and call the task" for task_id in task_ids: tasks[task_id]() while True: now = int(time()) # Get list of tasks on the schedule that are due. task_ids = list( map(int, redis_connection.zrangebyscore(scheduler_key, 0, now))) # Cycle over each and call the task. Any connection errors will trigger # a longer wait before retrying. try: for task_id in task_ids: tasks[task_id]() except requests.exceptions.ConnectionError as err: current_app.logger.warning( "Connection error. Retrying in {} seconds... \nError: {}". format(SCHEDULER_RETRY_INTERVAL, err)) sleep(SCHEDULER_RETRY_INTERVAL) sleep(SCHEDULER_INTERVAL)
def __init__(self, user_sessions, puzzle, puzzle_id, table_width, table_height): self.user_sessions = user_sessions self.puzzle = puzzle self.puzzle_id = puzzle_id for user_session in self.user_sessions: karma_key = init_karma_key(redis_connection, self.puzzle, user_session.ip, current_app.config) redis_connection.delete(karma_key) self.puzzle_pieces = self.user_sessions[0].get_data( "/puzzle-pieces/{0}/".format(self.puzzle_id), "api") self.mark = uuid4().hex[:10] self.table_width = table_width self.table_height = table_height self.movable_pieces = [ x["id"] for x in self.puzzle_pieces["positions"] if x["s"] != "1" ]
def patch(self, puzzle_id): "Pong. Determine the latency for this player." response = {"message": "", "name": "", "data": {"latency": 0}} args = {} xhr_data = request.get_json() if xhr_data: args.update(xhr_data) if request.form: args.update(request.form.to_dict(flat=True)) token = args.get("token") if token is None: response["message"] = "No token" response["name"] = "error" return make_response(json.jsonify(response), 400) user = current_app.secure_cookie.get(u"user") or user_id_from_ip( request.headers.get("X-Real-IP"), skip_generate=True, validate_shared_user=False, ) if user is None: response["message"] = "Player not currently logged in." response["name"] = "error" return make_response(json.jsonify(response), 400) user = int(user) cur = db.cursor() # Validate the puzzle_id result = cur.execute( fetch_query_string("select-id-status-from-puzzle-by-puzzle_id.sql"), {"puzzle_id": puzzle_id}, ).fetchall() if not result: response["message"] = "Puzzle not available" response["name"] = "invalid" cur.close() return make_response(json.jsonify(response), 400) else: (result, col_names) = rowify(result, cur.description) puzzle = result[0].get("id") status = result[0].get("status") if status not in ( ACTIVE, IN_QUEUE, COMPLETED, FROZEN, BUGGY_UNLISTED, NEEDS_MODERATION, REBUILD, IN_RENDER_QUEUE, RENDERING, RENDERING_FAILED, MAINTENANCE, ): response["message"] = "Puzzle no longer valid" response["name"] = "invalid" cur.close() sse.publish( "Puzzle no longer valid", type="invalid", channel="puzzle:{puzzle_id}".format(puzzle_id=puzzle_id), ) return make_response(json.jsonify(response), 200) cur.close() # Determine latency for the player and record timestamp in sorted set. pingtoken_key = get_pingtoken_key(puzzle, user, token) ping_start = redis_connection.get(pingtoken_key) redis_connection.delete(pingtoken_key) ping_end = int(time.time() * 1000) if not ping_start: response["message"] = "Ignoring error when determining latency." response["name"] = "ignored" return make_response(json.jsonify(response), 200) ping_start = int(ping_start) ping_key = get_ping_key(puzzle) redis_connection.zadd(ping_key, {user: ping_end}) redis_connection.expire(ping_key, PING_EXPIRE) latency = ping_end - ping_start # Record the latency for the player redis_connection.lpush( "latency", "{user}:{timestamp}:{latency}".format( user=user, timestamp=ping_end, latency=latency ), ) # Keep only the last 1000 entries to latency redis_connection.ltrim("latency", 0, 999) response["message"] = "Latency" response["data"]["latency"] = latency response["name"] = "success" response = make_response(json.jsonify(response), 200) return response
def post(self): "Route is protected by basic auth in nginx" args = {} if request.form: args.update(request.form.to_dict(flat=True)) # TODO: Check user to see if role matches? # user = current_app.secure_cookie.get(u'user') # if not user: # abort(403) # Verify args action = args.get("action") if action not in ACTIONS: abort(400) reject = args.get("reject") if action == "reject" and reject not in ("license", "attribution"): abort(400) delete = args.get("delete") if action == "delete" and delete not in ("license", "inapt", "old", "request"): abort(400) # abort if tag value not set tag = args.get("tag") if action == "tag" and not tag: abort(400) puzzle_ids = request.form.getlist("montage_puzzle_id") if len(puzzle_ids) == 0 or len(puzzle_ids) > 20: abort(400) if not isinstance(puzzle_ids, list): puzzle_ids = [puzzle_ids] cur = db.cursor() status = None if action == "approve": # TODO: May need to be set to REBUILD if it is an existing puzzle, # otherwise the preview_full.jpg will be recreated. Use new # "rebuild" action instead of just "approve". status = IN_RENDER_QUEUE if action == "reject": if reject == "license": status = FAILED_LICENSE elif reject == "attribution": status = NO_ATTRIBUTION if action == "delete": if delete == "license": status = DELETED_LICENSE elif delete == "inapt": status = DELETED_INAPT elif delete == "old": status = DELETED_OLD elif delete == "request": status = DELETED_REQUEST for puzzle_id in puzzle_ids: delete_puzzle_resources(puzzle_id) id = cur.execute( fetch_query_string("select_puzzle_id_by_puzzle_id.sql"), { "puzzle_id": puzzle_id }, ).fetchone()[0] # current_app.logger.info('deleting puzzle resources for id {}'.format(id)) cur.execute( fetch_query_string("delete_puzzle_file_for_puzzle.sql"), {"puzzle": id}, ) cur.execute(fetch_query_string("delete_piece_for_puzzle.sql"), {"puzzle": id}) cur.execute(fetch_query_string("delete_puzzle_timeline.sql"), {"puzzle": id}) redis_connection.delete("timeline:{puzzle}".format(puzzle=id)) redis_connection.delete("score:{puzzle}".format(puzzle=id)) db.commit() def each(puzzle_ids): for puzzle_id in puzzle_ids: yield {"puzzle_id": puzzle_id, "status": status} cur.executemany( fetch_query_string("update_puzzle_status_for_puzzle_id.sql"), each(puzzle_ids), ) db.commit() for puzzle_id in puzzle_ids: purge_route_from_nginx_cache( "/chill/site/front/{puzzle_id}/".format(puzzle_id=puzzle_id), current_app.config.get("PURGEURLLIST"), ) if action == "approve": puzzles = rowify( cur.execute( fetch_query_string("select-puzzles-in-render-queue.sql"), { "IN_RENDER_QUEUE": IN_RENDER_QUEUE, "REBUILD": REBUILD }, ).fetchall(), cur.description, )[0] print("found {0} puzzles to render".format(len(puzzles))) # push each puzzle to artist job queue for puzzle in puzzles: job = current_app.createqueue.enqueue_call( func="api.jobs.pieceRenderer.render", args=([puzzle]), result_ttl=0, timeout="24h", ) # TODO: if action in ('reject', 'delete'): #Also apply to any puzzle instances cur.close() return make_response("204", 204)
def patch(self, puzzle_id, piece): """ args: x y r """ def _blockplayer(): timeouts = current_app.config["BLOCKEDPLAYER_EXPIRE_TIMEOUTS"] blocked_count_ip_key = f"blocked:{ip}" expire_index = max(0, redis_connection.incr(blocked_count_ip_key) - 1) redis_connection.expire(blocked_count_ip_key, timeouts[-1]) timeout = timeouts[min(expire_index, len(timeouts) - 1)] expires = now + timeout blockedplayers_for_puzzle_key = "blockedplayers:{puzzle}".format( puzzle=puzzle) # Add the player to the blocked players list for the puzzle and # extend the expiration of the key. redis_connection.zadd(blockedplayers_for_puzzle_key, {user: expires}) redis_connection.expire(blockedplayers_for_puzzle_key, timeouts[-1]) err_msg = get_blockedplayers_err_msg(expires, expires - now) sse.publish( "{user}:{piece}:{karma}:{karma_change}".format( user=user, piece=piece, karma=karma + recent_points, karma_change=karma_change, ), type="karma", channel="puzzle:{puzzle_id}".format( puzzle_id=puzzle_data["puzzle_id"]), ) return make_response(json.jsonify(err_msg), 429) ip = request.headers.get("X-Real-IP") validate_token = (len({"all", "valid_token"}.intersection( current_app.config["PUZZLE_RULES"])) > 0) user = None now = int(time.time()) # validate the args and headers args = {} xhr_data = request.get_json() if xhr_data: args.update(xhr_data) if request.form: args.update(request.form.to_dict(flat=True)) if len(list(args.keys())) == 0: err_msg = { "msg": "invalid args", "type": "invalid", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # check if args are only in acceptable set if len(self.ACCEPTABLE_ARGS.intersection(set(args.keys()))) != len( list(args.keys())): err_msg = { "msg": "invalid args", "type": "invalid", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # validate that all values are int for key, value in list(args.items()): if not isinstance(value, int): try: args[key] = int(value) except ValueError: err_msg = { "msg": "invalid args", "type": "invalid", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) x = args.get("x") y = args.get("y") r = args.get("r") snapshot_id = request.headers.get("Snap") # Token is to make sure puzzle is still in sync. # validate the token token = request.headers.get("Token") if not token: err_msg = { "msg": "Missing token", "type": "missing", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) mark = request.headers.get("Mark") if not mark: err_msg = { "msg": "Missing mark", "type": "missing", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # start = time.perf_counter() existing_token = redis_connection.get(f"t:{mark}") if validate_token and existing_token: (m_puzzle, m_piece, m_user) = existing_token.split(":") user = int(m_user) else: user = current_app.secure_cookie.get("user") or user_id_from_ip( ip, validate_shared_user=False) if user is None: err_msg = { "msg": "Please reload the page.", "reason": "The player login was not found.", "type": "puzzlereload", "timeout": 300, } return make_response(json.jsonify(err_msg), 400) user = int(user) pzq_key = "pzq:{puzzle_id}".format(puzzle_id=puzzle_id) pzq_fields = [ "puzzle", "table_width", "table_height", "permission", "pieces", ] puzzle_data = dict( zip(pzq_fields, redis_connection.hmget(pzq_key, pzq_fields))) puzzle = puzzle_data.get("puzzle") if puzzle is None: req = requests.get( "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/details/" .format( HOSTAPI=current_app.config["HOSTAPI"], PORTAPI=current_app.config["PORTAPI"], puzzle_id=puzzle_id, ), ) if req.status_code >= 400: err_msg = {"msg": "puzzle not available", "type": "missing"} return make_response(json.jsonify(err_msg), req.status_code) try: result = req.json() except ValueError as err: err_msg = {"msg": "puzzle not available", "type": "missing"} return make_response(json.jsonify(err_msg), 500) if result.get("status") not in (ACTIVE, BUGGY_UNLISTED): err_msg = {"msg": "puzzle not available", "type": "missing"} return make_response(json.jsonify(err_msg), 404) puzzle_data = result puzzle_data["puzzle"] = result["id"] redis_connection.hmset( pzq_key, { "puzzle": puzzle_data["puzzle"], "table_width": puzzle_data["table_width"], "table_height": puzzle_data["table_height"], "permission": puzzle_data["permission"], "pieces": puzzle_data["pieces"], }, ) redis_connection.expire(pzq_key, 300) else: puzzle_data["puzzle"] = int(puzzle_data["puzzle"]) puzzle_data["table_width"] = int(puzzle_data["table_width"]) puzzle_data["table_height"] = int(puzzle_data["table_height"]) puzzle_data["permission"] = int(puzzle_data["permission"]) puzzle_data["pieces"] = int(puzzle_data["pieces"]) puzzle = int(puzzle_data["puzzle"]) puzzle_data["puzzle_id"] = puzzle_id puzzle_piece_token_key = get_puzzle_piece_token_key(puzzle, piece) validate_token = (len({"all", "valid_token"}.intersection( current_app.config["PUZZLE_RULES"])) > 0) if validate_token: token_and_mark = redis_connection.get(puzzle_piece_token_key) if token_and_mark: (valid_token, other_mark) = token_and_mark.split(":") # other_user = int(other_user) if token != valid_token: err_msg = increase_ban_time(user, TOKEN_INVALID_BAN_TIME_INCR) err_msg["reason"] = "Token is invalid" return make_response(json.jsonify(err_msg), 409) if mark != other_mark: err_msg = increase_ban_time(user, TOKEN_INVALID_BAN_TIME_INCR) err_msg["reason"] = "Player is invalid" return make_response(json.jsonify(err_msg), 409) else: err_msg = { "msg": "Token has expired", "type": "expiredtoken", "reason": "", } return make_response(json.jsonify(err_msg), 409) # Expire the token since it shouldn't be used again if validate_token: redis_connection.delete(puzzle_piece_token_key) redis_connection.delete(f"t:{mark}") if (len({"all", "piece_translate_rate"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): err_msg = bump_count(user) if err_msg.get("type") == "bannedusers": return make_response(json.jsonify(err_msg), 429) # Check if piece will be moved to within boundaries if x and (x < 0 or x > puzzle_data["table_width"]): err_msg = { "msg": "Piece movement out of bounds", "type": "invalidpiecemove", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) if y and (y < 0 or y > puzzle_data["table_height"]): err_msg = { "msg": "Piece movement out of bounds", "type": "invalidpiecemove", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) # Check again if piece can be moved and hasn't changed since getting token has_y = redis_connection.hget( "pc:{puzzle}:{piece}".format(puzzle=puzzle, piece=piece), "y") if has_y is None: err_msg = {"msg": "piece not available", "type": "missing"} return make_response(json.jsonify(err_msg), 404) if redis_connection.sismember(f"pcfixed:{puzzle}", piece) == 1: # immovable err_msg = { "msg": "piece can't be moved", "type": "immovable", "expires": now + 5, "timeout": 5, } return make_response(json.jsonify(err_msg), 400) (_, _, _, origin_x, origin_y, _) = unpack_token(token) redis_connection.publish( f"enforcer_piece_translate:{puzzle}", f"{user}:{piece}:{origin_x}:{origin_y}:{x}:{y}", ) points_key = "points:{user}".format(user=user) recent_points = int(redis_connection.get(points_key) or "0") karma_key = init_karma_key(redis_connection, puzzle, ip, current_app.config) karma = int(redis_connection.get(karma_key)) karma_change = 0 current_app.logger.debug( f"user: {user} ip: {ip} karma: {karma} recent_points {recent_points}" ) if (len({"all", "puzzle_open_rate"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease recent points if this is a new puzzle that user hasn't moved pieces on yet in the last hour pzrate_key = "pzrate:{user}:{today}".format( user=user, today=datetime.date.today().isoformat()) if redis_connection.sadd(pzrate_key, puzzle) == 1: # New puzzle that player hasn't moved a piece on in the last hour. redis_connection.expire(pzrate_key, HOUR) if recent_points > 0: redis_connection.decr(points_key) if (len({"all", "piece_move_rate"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease karma if piece movement rate has passed threshold pcrate_key = f"pcrate:{puzzle}:{user}" moves = redis_connection.incr(pcrate_key) redis_connection.expire(pcrate_key, PIECE_MOVEMENT_RATE_TIMEOUT) if moves > PIECE_MOVEMENT_RATE_LIMIT: if karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 if (len({"all", "hot_piece"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease karma when moving the same piece multiple times within # a minute. hotpc_key = f"hotpc:{puzzle}:{user}:{piece}" recent_move_count = redis_connection.incr(hotpc_key) redis_connection.expire(hotpc_key, HOT_PIECE_MOVEMENT_RATE_TIMEOUT) if recent_move_count > MOVES_BEFORE_PENALTY: if karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 if (len({"all", "hot_spot"}.intersection( current_app.config["PUZZLE_RULES"])) > 0): # Decrease the karma for the player if the piece is in a hotspot. hotspot_piece_key = f"hotspot:{puzzle}:{user}:{piece}" hotspot_count = int(redis_connection.get(hotspot_piece_key) or "0") if hotspot_count > HOTSPOT_LIMIT: if karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 if karma_change < 0: # Decrease recent points for a piece move that decreased karma if recent_points > 0 and karma_change < 0: recent_points = redis_connection.decr(points_key) if karma + recent_points <= 0: return _blockplayer() piece_move_timeout = current_app.config["PIECE_MOVE_TIMEOUT"] # Use a custom built and managed queue to prevent multiple processes # from running the attempt_piece_movement concurrently on the same # puzzle. pzq_current_key = "pzq_current:{puzzle}".format(puzzle=puzzle) pzq_next_key = "pzq_next:{puzzle}".format(puzzle=puzzle) # The attempt_piece_movement bumps the pzq_current by 1 pzq_next = redis_connection.incr(pzq_next_key, amount=1) # Set the expire in case it fails to reach expire in attempt_piece_movement. redis_connection.expire(pzq_current_key, piece_move_timeout + 2) redis_connection.expire(pzq_next_key, piece_move_timeout + 2) attempt_count = 0 attempt_timestamp = time.time() timeout = attempt_timestamp + piece_move_timeout while attempt_timestamp < timeout: pzq_current = int(redis_connection.get(pzq_current_key) or "0") if pzq_current == pzq_next - 1: try: snapshot_msg = None snapshot_karma_change = False if snapshot_id: snapshot_key = f"snap:{snapshot_id}" snapshot = redis_connection.get(snapshot_key) if snapshot: snapshot_list = snapshot.split(":") snapshot_pzq = int(snapshot_list.pop(0)) if snapshot_pzq != pzq_current: # Check if any adjacent pieces are within range of x, y, r # Within that list check if any have moved # With the first one that has moved that was within range attempt piece movement on that by using adjusted x, y, r snaps = list( map(lambda x: x.split("_"), snapshot_list)) adjacent_piece_ids = list( map(lambda x: int(x[0]), snaps)) adjacent_piece_props_snaps = list( map(lambda x: x[1:], snaps)) property_list = [ "x", "y", "r", # "g" ] results = [] with redis_connection.pipeline( transaction=True) as pipe: for adjacent_piece_id in adjacent_piece_ids: pc_puzzle_adjacent_piece_key = ( f"pc:{puzzle}:{adjacent_piece_id}") pipe.hmget( pc_puzzle_adjacent_piece_key, property_list, ) results = pipe.execute() for ( a_id, snapshot_adjacent, updated_adjacent, ) in zip( adjacent_piece_ids, adjacent_piece_props_snaps, results, ): updated_adjacent = list( map( lambda x: x if isinstance(x, str) else "", updated_adjacent, )) adjacent_offset = snapshot_adjacent.pop() if (snapshot_adjacent != updated_adjacent ) and adjacent_offset: (a_offset_x, a_offset_y) = map( int, adjacent_offset.split(",")) (a_snap_x, a_snap_y) = map( int, snapshot_adjacent[:2]) # Check if the x,y is within range of the adjacent piece that has moved piece_join_tolerance = current_app.config[ "PIECE_JOIN_TOLERANCE"] if (abs((a_snap_x + a_offset_x) - x) <= piece_join_tolerance and abs((a_snap_y + a_offset_y) - y) <= piece_join_tolerance): (a_moved_x, a_moved_y) = map( int, updated_adjacent[:2]) # Decrease pzq_current since it is moving an extra piece out of turn redis_connection.decr( pzq_current_key, amount=1) ( snapshot_msg, snapshot_karma_change, ) = attempt_piece_movement( ip, user, puzzle_data, piece, a_moved_x + a_offset_x, a_moved_y + a_offset_y, r, karma_change, karma, ) break except: pzq_current = int( redis_connection.get(pzq_current_key) or "0") if pzq_current == pzq_next - 1: # skip this piece move attempt redis_connection.incr(pzq_current_key, amount=1) current_app.logger.warning( "results123 other error {}".format(sys.exc_info()[0])) raise (msg, karma_change) = attempt_piece_movement( ip, user, puzzle_data, piece, x, y, r, karma_change or snapshot_karma_change, karma, ) if isinstance(snapshot_msg, str) and isinstance(msg, str): msg = snapshot_msg + msg break current_app.logger.debug(f"pzq_current is {pzq_current}") attempt_timestamp = time.time() attempt_count = attempt_count + 1 # TODO: The sleep time should be set based on an average time it # takes to process piece movements. time.sleep(0.02) # Decrease karma here to potentially block a player that # continually tries to move pieces when a puzzle is too active. if (len({"all", "too_active"}.intersection( current_app.config["PUZZLE_RULES"])) > 0) and karma > 0: karma = redis_connection.decr(karma_key) karma_change -= 1 current_app.logger.debug( f"Puzzle ({puzzle}) piece move attempts: {attempt_count}") if attempt_timestamp >= timeout: current_app.logger.warn( f"Puzzle {puzzle} is too active. Attempt piece move timed out after trying {attempt_count} times." ) err_msg = { "msg": "Piece movement timed out.", "type": "error", "reason": "Puzzle is too active", "timeout": piece_move_timeout, } return make_response( json.jsonify(err_msg), 503, ) # Check msg for error or if piece can't be moved if not isinstance(msg, str): if isinstance(msg, dict): return make_response(json.jsonify(msg), 400) else: current_app.logger.warning("Unknown error: {}".format(msg)) return make_response( json.jsonify({ "msg": msg, "type": "error", "timeout": 3 }), 500) # publish just the bit movement so it matches what this player did bitmsg = formatBitMovementString(user, x, y) sse.publish( bitmsg, type="move", channel="puzzle:{puzzle_id}".format(puzzle_id=puzzle_id), ) if karma_change < 0: if karma + recent_points <= 0: return _blockplayer() # end = time.perf_counter() # current_app.logger.debug("PuzzlePiecesMovePublishView {}".format(end - start)) return make_response("", 204)
def move_random_piece(self, user_session): piece_id = choice(self.movable_pieces) x = randint(0, self.table_width - 100) y = randint(0, self.table_height - 100) start = time.perf_counter() piece_token = None try: piece_token = user_session.get_data( "/puzzle/{puzzle_id}/piece/{piece_id}/token/?mark={mark}". format( puzzle_id=self.puzzle_id, piece_id=piece_id, mark=self.mark, ), "publish", ) except Exception as err: # ("resetting karma for {ip}".format(ip=user_session.ip)) karma_key = init_karma_key(redis_connection, self.puzzle, user_session.ip, current_app.config) redis_connection.delete(karma_key) redis_connection.zrem("bannedusers", user_session.shareduser) # current_app.logger.debug(f"get token error: {err}") if str(err) == "blockedplayer": blockedplayers_for_puzzle_key = "blockedplayers:{puzzle}".format( puzzle=self.puzzle) # current_app.logger.debug("clear out {}".format(blockedplayers_for_puzzle_key)) redis_connection.delete(blockedplayers_for_puzzle_key) return if piece_token and piece_token.get("token"): puzzle_pieces_move = None try: puzzle_pieces_move = user_session.patch_data( "/puzzle/{puzzle_id}/piece/{piece_id}/move/".format( puzzle_id=self.puzzle_id, piece_id=piece_id), "publish", payload={ "x": x, "y": y, "r": 0 }, headers={ "Token": piece_token["token"], "Mark": self.mark }, ) except Exception as err: if str(err) == "too_active": redis_connection.incr("testdata:too_active") time.sleep(30) else: # current_app.logger.debug('move exception {}'.format(err)) # current_app.logger.debug("resetting karma for {ip}".format(ip=user_session.ip)) karma_key = init_karma_key( redis_connection, self.puzzle, user_session.ip, current_app.config, ) redis_connection.delete(karma_key) redis_connection.zrem("bannedusers", user_session.shareduser) return if puzzle_pieces_move: if puzzle_pieces_move.get("msg") == "boing": raise Exception("boing") # Reset karma:puzzle:ip redis key when it gets low if puzzle_pieces_move["karma"] < 2: # print("resetting karma for {ip}".format(ip=user_session.ip)) karma_key = init_karma_key( redis_connection, self.puzzle, user_session.ip, current_app.config, ) redis_connection.delete(karma_key) else: # empty response (204) means success end = time.perf_counter() duration = end - start redis_connection.rpush("testdata:pa", duration)
def archive_and_clear(puzzle): """ Create an archive file for all timeline data for this puzzle. Clear the timeline entries in the database. """ cur = db.cursor() result = cur.execute( fetch_query_string("select-all-from-puzzle-by-id.sql"), { "puzzle": puzzle }, ).fetchall() if not result: current_app.logger.warn( "no puzzle details found for puzzle {}".format(puzzle)) cur.close() return (result, col_names) = rowify(result, cur.description) puzzle_data = result[0] puzzle_id = puzzle_data["puzzle_id"] result = cur.execute(query_select_timeline_for_puzzle, { "puzzle": puzzle }).fetchall() if not result: # No timeline? cur.close() return (result, col_names) = rowify(result, cur.description) puzzle_directory = os.path.join(current_app.config.get("PUZZLE_ARCHIVE"), str(puzzle)) try: os.mkdir(puzzle_directory) except OSError: # directory already exists pass timeline_directory = os.path.join(puzzle_directory, "timeline") try: os.mkdir(timeline_directory) except OSError: # directory already exists pass archive_filename = get_next_file(timeline_directory) archive_file = open(archive_filename, "w") json.dump(result, archive_file, separators=(",", ":"), sort_keys=True) archive_file.close() r = requests.delete( "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/timeline/". format( HOSTAPI=current_app.config["HOSTAPI"], PORTAPI=current_app.config["PORTAPI"], puzzle_id=puzzle_id, ), ) if r.status_code != 200: current_app.logger.warning( "Puzzle timeline api error. Could not delete timeline entries for puzzle. Skipping {puzzle_id}" .format(puzzle_id=puzzle_id, )) cur.close() return redis_connection.delete("timeline:{puzzle}".format(puzzle=puzzle)) redis_connection.delete("score:{puzzle}".format(puzzle=puzzle)) cur.close()
def patch(self, puzzle_id): "Pong. Determine the latency for this player." response = {"message": "", "name": "", "data": {"latency": 0}} args = {} xhr_data = request.get_json() if xhr_data: args.update(xhr_data) if request.form: args.update(request.form.to_dict(flat=True)) token = args.get("token") if token == None: response["message"] = "No token" response["name"] = "error" return make_response(json.jsonify(response), 400) user = current_app.secure_cookie.get(u"user") or user_id_from_ip( request.headers.get("X-Real-IP"), skip_generate=True) if user == None: response["message"] = "Player not currently logged in." response["name"] = "error" return make_response(json.jsonify(response), 400) user = int(user) cur = db.cursor() # Validate the puzzle_id result = cur.execute( fetch_query_string("select_viewable_puzzle_id.sql"), { "puzzle_id": puzzle_id }, ).fetchall() if not result: response["message"] = "Invalid puzzle id." response["name"] = "error" cur.close() db.commit() return make_response(json.jsonify(response), 400) else: (result, col_names) = rowify(result, cur.description) puzzle = result[0].get("puzzle") status = result[0].get("status") if status != ACTIVE: response["message"] = "Puzzle not active" response["name"] = "invalid" cur.close() db.commit() return make_response(json.jsonify(response), 200) # Determine latency for the player and record timestamp in sorted set. pingtoken_key = get_pingtoken_key(puzzle, user, token) ping_start = redis_connection.get(pingtoken_key) redis_connection.delete(pingtoken_key) ping_end = int(time.time() * 1000) if not ping_start: response["message"] = "Ignoring error when determining latency." response["name"] = "ignored" return make_response(json.jsonify(response), 200) ping_start = int(ping_start) ping_key = get_ping_key(puzzle) redis_connection.zadd(ping_key, {user: ping_end}) redis_connection.expire(ping_key, PING_EXPIRE) latency = ping_end - ping_start # Record the latency for the player redis_connection.lpush( "latency", "{user}:{timestamp}:{latency}".format(user=user, timestamp=ping_end, latency=latency), ) # Keep only the last 1000 entries to latency redis_connection.ltrim("latency", 0, 999) response["message"] = "Latency" response["data"]["latency"] = latency response["name"] = "success" response = make_response(json.jsonify(response), 200) return response