Exemplo n.º 1
0
        def _blockplayer():
            timeouts = current_app.config["BLOCKEDPLAYER_EXPIRE_TIMEOUTS"]
            blocked_count_ip_key = f"blocked:{ip}"
            expire_index = max(0,
                               redis_connection.incr(blocked_count_ip_key) - 1)
            redis_connection.expire(blocked_count_ip_key, timeouts[-1])
            timeout = timeouts[min(expire_index, len(timeouts) - 1)]
            expires = now + timeout
            blockedplayers_for_puzzle_key = "blockedplayers:{puzzle}".format(
                puzzle=puzzle)
            # Add the player to the blocked players list for the puzzle and
            # extend the expiration of the key.
            redis_connection.zadd(blockedplayers_for_puzzle_key,
                                  {user: expires})
            redis_connection.expire(blockedplayers_for_puzzle_key,
                                    timeouts[-1])

            err_msg = get_blockedplayers_err_msg(expires, expires - now)
            sse.publish(
                "{user}:{piece}:{karma}:{karma_change}".format(
                    user=user,
                    piece=piece,
                    karma=karma + recent_points,
                    karma_change=karma_change,
                ),
                type="karma",
                channel="puzzle:{puzzle_id}".format(
                    puzzle_id=puzzle_data["puzzle_id"]),
            )
            return make_response(json.jsonify(err_msg), 429)
Exemplo n.º 2
0
 def __call__(self):
     self.do_task()
     if self.id == None:
         # The id may be None if the task is being run manually.  Running
         # a task manually shouldn't change the schedule.
         return
     now = int(time())
     due = now + self.interval
     current_app.logger.debug(
         "{format_due} - task {task_name} {task_id} due date".format(
             **{
                 "format_due": ctime(due),
                 "task_name": self.task_name,
                 "task_id": self.id,
             }))
     redis_connection.zadd(scheduler_key, {self.id: due})
Exemplo n.º 3
0
def all_tasks():
    """
    Cycle through all tasks in the task registry and run them at their set
    interval.
    """

    # Reset scheduler to start by removing any previous scheduled tasks
    redis_connection.delete(scheduler_key)

    now = int(time())
    tasks = {}
    for index in range(len(task_registry)):
        # Create each task with an id corresponding to the index
        tasks[index] = task_registry[index](index)

    task_ids_scheduled_to_now = dict(
        zip(tasks.keys(), map(lambda x: now, range(len(tasks)))))

    # reset all tasks to be scheduled now
    redis_connection.zadd(scheduler_key, task_ids_scheduled_to_now)

    def cycle_over_tasks():
        "Cycle over each and call the task"
        for task_id in task_ids:
            tasks[task_id]()

    while True:
        now = int(time())
        # Get list of tasks on the schedule that are due.
        task_ids = list(
            map(int, redis_connection.zrangebyscore(scheduler_key, 0, now)))

        # Cycle over each and call the task. Any connection errors will trigger
        # a longer wait before retrying.
        try:
            for task_id in task_ids:
                tasks[task_id]()
        except requests.exceptions.ConnectionError as err:
            current_app.logger.warning(
                "Connection error. Retrying in {} seconds... \nError: {}".
                format(SCHEDULER_RETRY_INTERVAL, err))
            sleep(SCHEDULER_RETRY_INTERVAL)

        sleep(SCHEDULER_INTERVAL)
Exemplo n.º 4
0
def increase_ban_time(user, seconds):
    now = int(time.time())
    current = int(redis_connection.zscore("bannedusers", user) or now)
    current = max(current, now)
    ban_timestamp = min(current + seconds, now + MAX_BAN_TIME)
    redis_connection.zadd("bannedusers", {user: ban_timestamp})
    return {
        "msg":
        "Temporarily banned. Ban time has increased by {} seconds".format(
            seconds),
        "type":
        "bannedusers",
        "user":
        user,
        "expires":
        ban_timestamp,
        "timeout":
        ban_timestamp - now,
    }
Exemplo n.º 5
0
    def patch(self, puzzle_id):
        "Pong. Determine the latency for this player."
        response = {"message": "", "name": "", "data": {"latency": 0}}

        args = {}
        xhr_data = request.get_json()
        if xhr_data:
            args.update(xhr_data)
        if request.form:
            args.update(request.form.to_dict(flat=True))

        token = args.get("token")
        if token is None:
            response["message"] = "No token"
            response["name"] = "error"
            return make_response(json.jsonify(response), 400)

        user = current_app.secure_cookie.get(u"user") or user_id_from_ip(
            request.headers.get("X-Real-IP"),
            skip_generate=True,
            validate_shared_user=False,
        )

        if user is None:
            response["message"] = "Player not currently logged in."
            response["name"] = "error"
            return make_response(json.jsonify(response), 400)

        user = int(user)

        cur = db.cursor()

        # Validate the puzzle_id
        result = cur.execute(
            fetch_query_string("select-id-status-from-puzzle-by-puzzle_id.sql"),
            {"puzzle_id": puzzle_id},
        ).fetchall()
        if not result:
            response["message"] = "Puzzle not available"
            response["name"] = "invalid"
            cur.close()
            return make_response(json.jsonify(response), 400)
        else:
            (result, col_names) = rowify(result, cur.description)
            puzzle = result[0].get("id")
            status = result[0].get("status")
            if status not in (
                ACTIVE,
                IN_QUEUE,
                COMPLETED,
                FROZEN,
                BUGGY_UNLISTED,
                NEEDS_MODERATION,
                REBUILD,
                IN_RENDER_QUEUE,
                RENDERING,
                RENDERING_FAILED,
                MAINTENANCE,
            ):
                response["message"] = "Puzzle no longer valid"
                response["name"] = "invalid"
                cur.close()
                sse.publish(
                    "Puzzle no longer valid",
                    type="invalid",
                    channel="puzzle:{puzzle_id}".format(puzzle_id=puzzle_id),
                )
                return make_response(json.jsonify(response), 200)

        cur.close()
        # Determine latency for the player and record timestamp in sorted set.
        pingtoken_key = get_pingtoken_key(puzzle, user, token)
        ping_start = redis_connection.get(pingtoken_key)
        redis_connection.delete(pingtoken_key)
        ping_end = int(time.time() * 1000)
        if not ping_start:
            response["message"] = "Ignoring error when determining latency."
            response["name"] = "ignored"
            return make_response(json.jsonify(response), 200)
        ping_start = int(ping_start)
        ping_key = get_ping_key(puzzle)
        redis_connection.zadd(ping_key, {user: ping_end})
        redis_connection.expire(ping_key, PING_EXPIRE)

        latency = ping_end - ping_start

        # Record the latency for the player
        redis_connection.lpush(
            "latency",
            "{user}:{timestamp}:{latency}".format(
                user=user, timestamp=ping_end, latency=latency
            ),
        )
        # Keep only the last 1000 entries to latency
        redis_connection.ltrim("latency", 0, 999)

        response["message"] = "Latency"
        response["data"]["latency"] = latency
        response["name"] = "success"
        response = make_response(json.jsonify(response), 200)
        return response
Exemplo n.º 6
0
    def publishMessage(msg, karma_change, karma, points=0, complete=False):
        # print(topic)
        # print(msg)
        if current_app.config.get("PUZZLE_PIECES_CACHE_TTL"):
            stamp = redis_connection.get(f"pzstamp:{puzzle}")
            if stamp:
                pcu_key = f"pcu:{stamp}"
                redis_connection.rpushx(pcu_key, msg)
        sse.publish(
            msg,
            type="move",
            channel="puzzle:{puzzle_id}".format(
                puzzle_id=puzzleData["puzzle_id"]),
        )

        if user != ANONYMOUS_USER_ID:
            points_key = "points:{user}".format(user=user)
            recent_points = int(redis_connection.get(points_key) or 0)
            if karma_change < 0 and karma <= 0 and recent_points > 0:
                redis_connection.decr(points_key)

        redis_connection.zadd("pcupdates", {puzzle: now})

        if user != ANONYMOUS_USER_ID:
            # bump the m_date for this player on the puzzle and timeline
            redis_connection.zadd("timeline:{puzzle}".format(puzzle=puzzle),
                                  {user: now})
            redis_connection.zadd("timeline", {user: now})

        # Update player points
        if points != 0 and user is not None and user != ANONYMOUS_USER_ID:
            redis_connection.zincrby("score:{puzzle}".format(puzzle=puzzle),
                                     amount=1,
                                     value=user)
            redis_connection.sadd("batchuser", user)
            redis_connection.sadd("batchpuzzle", puzzle)
            redis_connection.incr("batchscore:{user}".format(user=user),
                                  amount=1)
            redis_connection.incr(
                "batchpoints:{puzzle}:{user}".format(puzzle=puzzle, user=user),
                amount=points,
            )
            redis_connection.zincrby("rank", amount=1, value=user)
            pieces = int(puzzleData["pieces"])
            # Skip increasing dots if puzzle is private
            earns = get_earned_points(pieces,
                                      permission=puzzleData.get("permission"))

            # karma = int(redis_connection.get(karma_key))
            ## Max out recent points
            if (earns != 0 and karma >= current_app.config["MAX_KARMA"] and
                    recent_points < current_app.config["MAX_RECENT_POINTS"]):
                recent_points = redis_connection.incr(points_key)
            # Doing small puzzles doesn't increase recent points, just extends points expiration.
            redis_connection.expire(points_key,
                                    current_app.config["RECENT_POINTS_EXPIRE"])

            # Extend the karma points expiration since it has increased
            redis_connection.expire(karma_key,
                                    current_app.config["KARMA_POINTS_EXPIRE"])
            # Max out karma
            if karma < current_app.config["MAX_KARMA"]:
                karma = redis_connection.incr(karma_key)
            karma_change += 1

            redis_connection.incr("batchpoints:{user}".format(user=user),
                                  amount=earns)

        if complete:
            current_app.logger.info("puzzle {puzzle_id} is complete".format(
                puzzle_id=puzzleData["puzzle_id"]))

            sse.publish(
                "status:{}".format(COMPLETED),
                channel="puzzle:{puzzle_id}".format(
                    puzzle_id=puzzleData["puzzle_id"]),
            )

            r = requests.patch(
                "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/details/"
                .format(
                    HOSTAPI=current_app.config["HOSTAPI"],
                    PORTAPI=current_app.config["PORTAPI"],
                    puzzle_id=puzzleData["puzzle_id"],
                ),
                json={
                    "status": COMPLETED,
                    "m_date": time.strftime("%Y-%m-%d %H:%M:%S",
                                            time.gmtime()),
                    "queue": QUEUE_END_OF_LINE,
                },
            )
            if r.status_code != 200:
                raise Exception(
                    "Puzzle details api error when updating puzzle to be complete"
                )
            # Delaying helps avoid issues for players that are moving the last
            # piece of the puzzle as someone else completes it.
            delay = (current_app.config["MAX_PAUSE_PIECES_TIMEOUT"] +
                     current_app.config["PIECE_MOVE_TIMEOUT"] + 2)
            current_app.logger.info(
                f"Delaying puzzle transfer on completed puzzle ({puzzleData['puzzle_id']}) for {delay} seconds"
            )
            job = current_app.cleanupqueue.enqueue_in(
                timedelta(seconds=delay),
                "api.jobs.convertPiecesToDB.transfer",
                puzzle,
                result_ttl=0,
            )

            purge_route_from_nginx_cache(
                "/chill/site/front/{puzzle_id}/".format(
                    puzzle_id=puzzleData["puzzle_id"]),
                current_app.config.get("PURGEURLLIST"),
            )

        if karma_change and user != ANONYMOUS_USER_ID:
            sse.publish(
                "{user}:{piece}:{karma}:{karma_change}".format(
                    user=user,
                    piece=piece,
                    karma=karma + recent_points,
                    karma_change=karma_change,
                ),
                type="karma",
                channel="puzzle:{puzzle_id}".format(
                    puzzle_id=puzzleData["puzzle_id"]),
            )

        # end = time.perf_counter()
        # duration = end - start
        # redis_connection.rpush("testdata:translate", duration)
        # return topic and msg mostly for testing
        return (msg, karma_change)
Exemplo n.º 7
0
    def do_task(self):
        super().do_task()
        made_change = False

        cur = db.cursor()

        puzzle = redis_connection.spop("batchpuzzle")
        while puzzle:
            last_batch = redis_connection.zrangebyscore(
                "timeline:{puzzle}".format(puzzle=puzzle),
                self.last_run,
                "+inf",
                withscores=True,
            )
            for (user, update_timestamp) in last_batch:
                current_app.logger.debug(
                    "user: {user}, {update_timestamp}".format(
                        user=user, update_timestamp=update_timestamp))
                user = int(user)
                points = int(
                    redis_connection.getset(
                        "batchpoints:{puzzle}:{user}".format(puzzle=puzzle,
                                                             user=user),
                        value=0,
                    ) or "0")
                redis_connection.expire(
                    "batchpoints:{puzzle}:{user}".format(puzzle=puzzle,
                                                         user=user), DAY)
                if points != 0:
                    result = cur.execute(
                        fetch_query_string("select-all-from-puzzle-by-id.sql"),
                        {
                            "puzzle": puzzle
                        },
                    ).fetchall()
                    if not result:
                        current_app.logger.warn(
                            "no puzzle details found for puzzle {}".format(
                                puzzle))
                        continue
                    (result, col_names) = rowify(result, cur.description)
                    puzzle_data = result[0]
                    puzzle_id = puzzle_data["puzzle_id"]

                    timestamp = strftime("%Y-%m-%d %H:%M:%S",
                                         gmtime(update_timestamp))
                    current_app.logger.debug(
                        "{timestamp} - bumping {points} points on {puzzle} ({puzzle_id}) for player: {player}"
                        .format(
                            puzzle=puzzle,
                            puzzle_id=puzzle_id,
                            player=user,
                            points=points,
                            timestamp=timestamp,
                        ))

                    r = requests.post(
                        "http://{HOSTAPI}:{PORTAPI}/internal/puzzle/{puzzle_id}/timeline/"
                        .format(
                            HOSTAPI=current_app.config["HOSTAPI"],
                            PORTAPI=current_app.config["PORTAPI"],
                            puzzle_id=puzzle_id,
                        ),
                        json={
                            "player": user,
                            "points": points,
                            "timestamp": timestamp
                        },
                    )
                    if r.status_code != 200:
                        current_app.logger.warning(
                            "Puzzle timeline api error. Could not add batchpoints. Skipping {puzzle_id}"
                            .format(puzzle_id=puzzle_id, ))
                        continue

                made_change = True
            puzzle = redis_connection.spop("batchpuzzle")

        if self.first_run:
            result = cur.execute(
                read_query_file(
                    "get_list_of_puzzles_in_timeline.sql")).fetchall()
            if result and len(result):
                puzzle_list = list(map(lambda x: x[0], result))
                for puzzle in puzzle_list:
                    result = cur.execute(
                        read_query_file(
                            "select_user_score_and_timestamp_per_puzzle.sql"),
                        {
                            "puzzle": puzzle
                        },
                    ).fetchall()
                    if result and len(result):
                        current_app.logger.info(
                            "Set puzzle ({0}) score and puzzle timeline on {1} players"
                            .format(puzzle, len(result)))
                        user_score = dict(map(lambda x: [x[0], x[1]], result))
                        user_timestamps = dict(
                            map(lambda x: [x[0], int(x[2])], result))
                        redis_connection.zadd(
                            "timeline:{puzzle}".format(puzzle=puzzle),
                            user_timestamps)
                        redis_connection.zadd(
                            "score:{puzzle}".format(puzzle=puzzle), user_score)
                made_change = True

            self.first_run = False

        self.last_run = int(time())

        if made_change:
            self.log_task()

        cur.close()
Exemplo n.º 8
0
    def do_task(self):
        super().do_task()
        made_change = False

        cur = db.cursor()

        user = redis_connection.spop("batchuser")
        while user:
            user = int(user)
            score = redis_connection.getset(
                "batchscore:{user}".format(user=user), value=0)
            redis_connection.expire("batchscore:{user}".format(user=user), DAY)
            points = redis_connection.getset(
                "batchpoints:{user}".format(user=user), value=0)
            redis_connection.expire("batchpoints:{user}".format(user=user),
                                    DAY)

            current_app.logger.debug(
                "update user {id} with {points} points and score of {score}".
                format(**{
                    "id": user,
                    "points": points,
                    "score": score
                }))

            r = requests.post(
                "http://{HOSTAPI}:{PORTAPI}/internal/tasks/{task_name}/start/".
                format(
                    HOSTAPI=current_app.config["HOSTAPI"],
                    PORTAPI=current_app.config["PORTAPI"],
                    task_name="update_user_points_and_m_date",
                ),
                json={
                    "player": user,
                    "points": points,
                    "score": score,
                },
            )
            if r.status_code != 200:
                current_app.logger.warning(
                    "Internal tasks api error. Could not run task update_user_points_and_m_date for player {}"
                    .format(user))

            r = requests.post(
                "http://{HOSTAPI}:{PORTAPI}/internal/tasks/{task_name}/start/".
                format(
                    HOSTAPI=current_app.config["HOSTAPI"],
                    PORTAPI=current_app.config["PORTAPI"],
                    task_name="update_bit_icon_expiration",
                ),
                json={
                    "player": user,
                },
            )
            if r.status_code != 200:
                current_app.logger.warning(
                    "Internal tasks api error. Could not run task update_bit_icon_expiration for player {}"
                    .format(user))

            user = redis_connection.spop("batchuser")
            made_change = True

        if self.first_run:
            result = cur.execute(
                read_query_file(
                    "select_user_score_and_timestamp.sql")).fetchall()
            if result and len(result):
                current_app.logger.info(
                    "Set rank and timeline on {0} players".format(len(result)))
                user_scores = dict(map(lambda x: [x[0], x[1]], result))
                user_timestamps = dict(map(lambda x: [x[0], int(x[2])],
                                           result))
                redis_connection.zadd("rank", user_scores)
                redis_connection.zadd("timeline", user_timestamps)
                made_change = True
            self.first_run = False

        if made_change:
            self.log_task()

        cur.close()
Exemplo n.º 9
0
    def publishMessage(msg, karma_change, points=0, complete=False):
        # print(topic)
        # print(msg)
        sse.publish(
            msg,
            type="move",
            channel="puzzle:{puzzle_id}".format(
                puzzle_id=puzzleData["puzzle_id"]),
        )

        now = int(time.time())

        redis_connection.zadd("pcupdates", {puzzle: now})

        # TODO:
        # return (topic, msg)

        # bump the m_date for this player on the puzzle and timeline
        redis_connection.zadd("timeline:{puzzle}".format(puzzle=puzzle),
                              {user: now})
        redis_connection.zadd("timeline", {user: now})

        # Update player points
        if points != 0 and user != None:
            redis_connection.zincrby("score:{puzzle}".format(puzzle=puzzle),
                                     amount=1,
                                     value=user)
            redis_connection.sadd("batchuser", user)
            redis_connection.sadd("batchpuzzle", puzzle)
            redis_connection.incr("batchscore:{user}".format(user=user),
                                  amount=1)
            redis_connection.incr(
                "batchpoints:{puzzle}:{user}".format(puzzle=puzzle, user=user),
                amount=points,
            )
            redis_connection.zincrby("rank", amount=1, value=user)
            points_key = "points:{user}".format(user=user)
            pieces = int(puzzleData["pieces"])
            # Skip increasing dots if puzzle is private
            earns = get_earned_points(pieces,
                                      permission=puzzleData.get("permission"))

            karma = int(redis_connection.get(karma_key))
            ## Max out recent points
            # if earns != 0:
            #    recent_points = int(redis_connection.get(points_key) or 0)
            #    if karma + 1 + recent_points + earns < MAX_KARMA:
            #        redis_connection.incr(points_key, amount=earns)
            # Doing small puzzles doesn't increase recent points, just extends points expiration.
            redis_connection.expire(points_key, RECENT_POINTS_EXPIRE)

            karma_change += 1
            # Extend the karma points expiration since it has increased
            redis_connection.expire(karma_key, KARMA_POINTS_EXPIRE)
            # Max out karma
            if karma < MAX_KARMA:
                redis_connection.incr(karma_key)
            else:
                # Max out points
                if earns != 0:
                    recent_points = int(redis_connection.get(points_key) or 0)
                    if recent_points + earns <= MAX_RECENT_POINTS:
                        redis_connection.incr(points_key, amount=earns)

            redis_connection.incr("batchpoints:{user}".format(user=user),
                                  amount=earns)

        # TODO: Optimize by using redis for puzzle status
        if complete:
            current_app.logger.info("puzzle {puzzle_id} is complete".format(
                puzzle_id=puzzleData["puzzle_id"]))
            cur = db.cursor()

            cur.execute(
                fetch_query_string("update_puzzle_status_for_puzzle.sql"),
                {
                    "puzzle": puzzle,
                    "status": COMPLETED
                },
            )
            cur.execute(
                fetch_query_string("update_puzzle_m_date_to_now.sql"),
                {
                    "puzzle": puzzle,
                    "modified": now
                },
            )
            cur.execute(
                fetch_query_string("update_puzzle_queue_for_puzzle.sql"),
                {
                    "puzzle": puzzle,
                    "queue": QUEUE_END_OF_LINE
                },
            )
            db.commit()
            sse.publish(
                "status:{}".format(COMPLETED),
                channel="puzzle:{puzzle_id}".format(
                    puzzle_id=puzzleData["puzzle_id"]),
            )
            job = current_app.cleanupqueue.enqueue_call(
                func="api.jobs.convertPiecesToDB.transfer",
                args=(puzzle, ),
                result_ttl=0)

            purge_route_from_nginx_cache(
                "/chill/site/front/{puzzle_id}/".format(
                    puzzle_id=puzzleData["puzzle_id"]),
                current_app.config.get("PURGEURLLIST"),
            )

            db.commit()
            cur.close()

        # return topic and msg mostly for testing
        return (msg, karma_change)
Exemplo n.º 10
0
    def patch(self, puzzle_id):
        "Pong. Determine the latency for this player."
        response = {"message": "", "name": "", "data": {"latency": 0}}

        args = {}
        xhr_data = request.get_json()
        if xhr_data:
            args.update(xhr_data)
        if request.form:
            args.update(request.form.to_dict(flat=True))

        token = args.get("token")
        if token == None:
            response["message"] = "No token"
            response["name"] = "error"
            return make_response(json.jsonify(response), 400)

        user = current_app.secure_cookie.get(u"user") or user_id_from_ip(
            request.headers.get("X-Real-IP"), skip_generate=True)

        if user == None:
            response["message"] = "Player not currently logged in."
            response["name"] = "error"
            return make_response(json.jsonify(response), 400)

        user = int(user)

        cur = db.cursor()

        # Validate the puzzle_id
        result = cur.execute(
            fetch_query_string("select_viewable_puzzle_id.sql"),
            {
                "puzzle_id": puzzle_id
            },
        ).fetchall()
        if not result:
            response["message"] = "Invalid puzzle id."
            response["name"] = "error"
            cur.close()
            db.commit()
            return make_response(json.jsonify(response), 400)
        else:
            (result, col_names) = rowify(result, cur.description)
            puzzle = result[0].get("puzzle")
            status = result[0].get("status")
            if status != ACTIVE:
                response["message"] = "Puzzle not active"
                response["name"] = "invalid"
                cur.close()
                db.commit()
                return make_response(json.jsonify(response), 200)

        # Determine latency for the player and record timestamp in sorted set.
        pingtoken_key = get_pingtoken_key(puzzle, user, token)
        ping_start = redis_connection.get(pingtoken_key)
        redis_connection.delete(pingtoken_key)
        ping_end = int(time.time() * 1000)
        if not ping_start:
            response["message"] = "Ignoring error when determining latency."
            response["name"] = "ignored"
            return make_response(json.jsonify(response), 200)
        ping_start = int(ping_start)
        ping_key = get_ping_key(puzzle)
        redis_connection.zadd(ping_key, {user: ping_end})
        redis_connection.expire(ping_key, PING_EXPIRE)

        latency = ping_end - ping_start

        # Record the latency for the player
        redis_connection.lpush(
            "latency",
            "{user}:{timestamp}:{latency}".format(user=user,
                                                  timestamp=ping_end,
                                                  latency=latency),
        )
        # Keep only the last 1000 entries to latency
        redis_connection.ltrim("latency", 0, 999)

        response["message"] = "Latency"
        response["data"]["latency"] = latency
        response["name"] = "success"
        response = make_response(json.jsonify(response), 200)
        return response