def __init__(self): super().__init__() self.result_frame = ResultsTab() self.result_frame.results.info_label.hide() self.map_id = None self.q = Queue() self.replays = [] cache_path = get_setting("cache_dir") + "circleguard.db" self.cg = Circleguard(get_setting("api_key"), cache_path) self.info = QLabel(self) self.info.setText( "Visualizes Replays. Has theoretically support for an arbitrary amount of replays." ) self.label_map_id = QLabel(self) self.update_map_id_label() self.file_chooser = FolderChooser("Add Replays", folder_mode=False, multiple_files=True, file_ending="osu! Replayfile (*osr)", display_path=False) self.file_chooser.path_signal.connect(self.add_files) self.folder_chooser = FolderChooser("Add Folder", display_path=False) self.folder_chooser.path_signal.connect(self.add_folder) layout = QGridLayout() layout.addWidget(self.info) layout.addWidget(self.file_chooser) layout.addWidget(self.folder_chooser) layout.addWidget(self.label_map_id) layout.addWidget(self.result_frame) self.setLayout(layout)
def _get_graph_data(rp): global hit_map # inizilaite _cg _cg = Circleguard(OSU_API_KEY) # get beatmap bm = _library.lookup_by_id(MAP, download=True, save=True) # generate Investigator class investigator = Investigator(rp, bm, -1) # get hit_map hit_map = investigator.hit_map(0) # get diff list diffs_old = [i.error for i in hit_map] d = [diffs_old[i+1]-diffs_old[i] for i in range(len(diffs_old)-1)] list(d) diffs = np.array(d) arr1_interp = interp.interp1d(np.arange(diffs.size),diffs) arr1_compress = arr1_interp(np.linspace(0,diffs.size-1,16)) # make everything positiv offset = min(arr1_compress)*-1 arr1_compress = [i+offset for i in arr1_compress] # make normalize to 0-1 scale = max(arr1_compress) arr1_compress = [i/scale for i in arr1_compress] arr1_compress = np.array(arr1_compress) _cg.library.close() return arr1_compress
def cg(self): # if the user has changed their api key since we last retrieved our # circleguard instance, recreate circleguard with the new key. new_api_key = get_setting('api_key') if new_api_key != self.old_api_key or not self._cg: from circleguard import Circleguard cache_path = get_setting("cache_dir") + "circleguard.db" self._cg = Circleguard(new_api_key, cache_path) self.old_api_key = new_api_key return self._cg
def url_scheme_called(self, url): from circleguard import ReplayMap, Circleguard, Mod # url is bytes, so decode back to str url = url.decode() # windows appends an extra slash even if the original url didn't have # it, so remove it url = url.strip("/") # all urls can have any of the following parameters: # * m - the map id # * u - the first user's id # * u2 - the second user's id # * t - the timestamp to start at # * m1 - the mods the first replay was played with # * m2 - the mods the second replay was played with # For example, a url might look like # circleguard://m=221777&u=2757689&m1=HDHRu2=3219026&m2=HDHR map_id = int(re.compile(r"m=(.*?)(&|$)").search(url).group(1)) user_id = int(re.compile(r"u=(.*?)(&|$)").search(url).group(1)) timestamp_match = re.compile(r"t=(.*?)(&|$)").search(url) # start at the beginning if timestamp isn't specified timestamp = int(timestamp_match.group(1)) if timestamp_match else 0 # mods is optional, will take the user's highest play on the map if not # specified mods1_match = re.compile(r"m1=(.*?)(&|$)").search(url) mods1 = None if mods1_match: mods1 = mods1_match.group(1) user_id_2_match = re.compile(r"u2=(.*?)(&|$)").search(url) user_id_2 = None if user_id_2_match: user_id_2 = int(user_id_2_match.group(1)) mods2_match = re.compile(r"m2=(.*?)(&|$)").search(url) mods2 = None if mods2_match: mods2 = mods2_match.group(1) # convert the string into an actual mods object if we received it mods1 = Mod(mods1) if mods1 else None r = ReplayMap(map_id, user_id, mods1) cg = Circleguard(get_setting("api_key")) cg.load(r) replays = [r] if user_id_2: mods2 = Mod(mods2) if mods2 else None r2 = ReplayMap(map_id, user_id_2, mods2) cg.load(r2) replays.append(r2) # open visualizer for the given map and user, and jump to the timestamp result = URLAnalysisResult(replays, timestamp) self.cg_classic.main_tab.url_analysis_q.put(result)
def _get_norm_hit_map(rp): mods1 = [Mod(mod_val) for mod_val in utils.bits(rp.mods)] flip1 = Mod.HardRock in mods1 # inizilaite _cg _cg = Circleguard(OSU_API_KEY) # get beatmap bm = _library.lookup_by_id(MAP, download=True, save=True) # generate Investigator class investigator = Investigator(rp, bm, -1) # get hit_map hit_map = investigator.hit_map(flip1) circle_size = (109 - 9 * bm.circle_size) new_hitmap = [] for i in hit_map: i.x = i.x/circle_size i.y = i.y/circle_size new_hitmap.append(i) _cg.library.close() return new_hitmap
async def replay_check(self, replay_data_str: str) -> None: cg = Circleguard(glob.config.api_key) replay = ReplayString(replay_data_str) if glob.config.similarity_checks: # get bancho leaderboards and compare replay # TODO: check replays on private servers if self.map.status >= mapStatuses.Ranked: # has bancho lb _map = cg.Map( self.map.id, span="1-100", ) # maybe ill increase this to the top 1000? for mreplay in _map: sim = cg.similarity(replay, mreplay) if ( sim < 17 ): # suggested circlecore value, idk if this should change # THIS CAN FLAG LEGIT HENCE WHY IT FLAGS, PLEASE CHECK A REPLAY MANUALLY IF FLAGGED! return await self.user.flag( reason=f"potential replay botting using {mreplay.username}'s bancho replay (similarity: {sim:.2f}) on {self.map.name}", fr=glob.bot, ) self.ur = cg.ur(replay) # cant do := because class :( if self.ur < 70: await self.user.flag( reason=f"potential relax (ur: {self.ur:.2f}) on {self.map.name}", fr=glob.bot, ) # this can sometimes be a false positive but its detectable thru a visualized graph (i may send it alongside the embed at some point) # there is a fix in the works for detecting false positives if (ft := cg.frametime(replay)) < 14: await self.user.restrict( reason=f"timewarp cheating (frametime: {ft:.2f}) on {self.map.name}", fr=glob.bot, )
def setUpClass(cls, use_cache=True): # pass use_cache=False when we need super precise coordinates for tests # to work cache_path = Path(__file__).parent / "cache.db" cache_path = cache_path if use_cache else None cls.cg = Circleguard(KEY, db_path=cache_path)
def run_circleguard(self, run): from circleguard import (Circleguard, UnknownAPIException, ReplayPath, NoInfoAvailableException, Loader, LoadableContainer, replay_pairs) class TrackerLoader(Loader, QObject): """ A circleguard.Loader subclass that emits a signal when the loader is ratelimited. It inherits from QObject to allow us to use qt signals. """ ratelimit_signal = pyqtSignal(int) # length of the ratelimit in seconds check_stopped_signal = pyqtSignal() # how often to emit check_stopped_signal when ratelimited, in seconds INTERVAL = 0.250 def __init__(self, key, cacher=None): Loader.__init__(self, key, cacher) QObject.__init__(self) def _ratelimit(self, length): # sometimes the ratelimit length can get very close to zero or # even negative due to network request time and rounding. As # displaying a zero or negative wait time is confusing, don't # wait for any less than 2 seconds. length = max(length, 2) self.ratelimit_signal.emit(length) # how many times to wait for 1/4 second (rng standing for range) # we do this loop in order to tell run_circleguard to check if # the run was canceled, or the application quit, instead of # hanging on a long time.sleep rng = math.ceil(length / self.INTERVAL) for _ in range(rng): time.sleep(self.INTERVAL) self.check_stopped_signal.emit() self.update_label_signal.emit("Loading Replays") self.update_run_status_signal.emit(run.run_id, "Loading Replays") # reset every run self.show_no_cheat_found = True event = run.event try: core_cache = get_setting("cache_dir") + "circleguard.db" slider_cache = get_setting("cache_dir") should_cache = get_setting("caching") cg = Circleguard(get_setting("api_key"), core_cache, slider_dir=slider_cache, cache=should_cache, loader=TrackerLoader) def _ratelimited(length): message = get_setting("message_ratelimited") ts = datetime.now() self.write_to_terminal_signal.emit(message.format(s=length, ts=ts)) self.update_label_signal.emit("Ratelimited") self.update_run_status_signal.emit(run.run_id, "Ratelimited") def _check_event(event): """ Checks the given event to see if it is set. If it is, the run has been canceled through the queue tab or by the application being quit, and this thread exits through sys.exit(0). If the event is not set, returns silently. """ if event.wait(0): self.update_label_signal.emit("Canceled") self.set_progressbar_signal.emit(-1) # the loadables of a run may be extremely large. We keep # this run object around in a list so people can cancel it; # if we want to prevent a memory leak we need to remove a # run's loadables (we don't need it anymore after this run # finishes). run.loadables = None # may seem dirty, but actually relatively clean since it only affects this thread. # Any cleanup we may want to do later can occur here as well sys.exit(0) cg.loader.ratelimit_signal.connect(_ratelimited) cg.loader.check_stopped_signal.connect(partial(_check_event, event)) if "Similarity" in run.enabled_investigations: loadables1 = [] loadables2 = [] for loadable in run.loadables: if loadable.sim_group == 1: loadables1.append(loadable) else: loadables2.append(loadable) lc1 = LoadableContainer(loadables1) lc2 = LoadableContainer(loadables2) else: lc = LoadableContainer(run.loadables) message_loading_info = get_setting("message_loading_info").format(ts=datetime.now()) self.write_to_terminal_signal.emit(message_loading_info) if "Similarity" in run.enabled_investigations: cg.load_info(lc1) cg.load_info(lc2) replays1 = lc1.all_replays() replays2 = lc2.all_replays() all_replays = replays1 + replays2 else: cg.load_info(lc) all_replays = lc.all_replays() replays1 = all_replays replays2 = [] num_replays = len(all_replays) self.set_progressbar_signal.emit(num_replays) message_loading_replays = get_setting("message_loading_replays").format(ts=datetime.now(), num_replays=num_replays) self.write_to_terminal_signal.emit(message_loading_replays) def _skip_replay_with_message(replay, message): self.write_to_terminal_signal.emit(message) # the replay very likely (perhaps certainly) didn't get # loaded if the above exception fired. just remove it. # If two different loadables both contain this problematic # replay, we will attempt to remove it from the list twice. # Guard against this by checking for membership first. if replay in all_replays: all_replays.remove(replay) # check has already been initialized with the replay, # remove it here too or cg will try and load it again # when the check is ran if replay in replays1: replays1.remove(replay) if replay in replays2: replays2.remove(replay) # `[:]` implicitly copies the list, so we don't run into trouble # when removing elements from it while iterating for replay in all_replays[:]: _check_event(event) try: cg.load(replay) # circleparse sets replay_data to None if it's not a std # replay, which means replay.has_data() will be false, so # we need to do this check first to give a better error # message than "the replay is not available for download", # which is incorrect for local replays. if isinstance(replay, ReplayPath) and not replay.has_data(): _skip_replay_with_message(replay, "<div style='color:#ff5252'>The replay " + str(replay) + " is " + "not an osu!std replay.</div> We currently only support std replays. " "This replay has been skipped because of this.") elif not replay.has_data(): _skip_replay_with_message(replay, "<div style='color:#ff5252'>The replay " + str(replay) + " is " + "not available for download.</div> This is likely because it is not in the top 1k scores of " "the beatmap. This replay has been skipped because of this.") except NoInfoAvailableException as e: _skip_replay_with_message(replay, "<div style='color:#ff5252'>The replay " + str(replay) + " does " "not exist.</div>\nDouble check your map and/or user id. This replay has " "been skipped because of this.") except UnknownAPIException as e: _skip_replay_with_message(replay, "<div style='color:#ff5252'>The osu! api provided an invalid " "response:</div> " + str(e) + ". The replay " + str(replay) + " has been skipped because of this.") except LZMAError as e: _skip_replay_with_message(replay, "<div style='color:#ff5252'>lzma error while parsing a replay:</div> " + str(e) + ". The replay is either corrupted or has no replay data. The replay " + str(replay) + " has been skipped because of this.") except Exception as e: # print full traceback here for more debugging info. # Don't do it for the previous exceptions because the # cause of those is well understood, but that's not # necessarily the case for generic exceptions here. # attempting to use divs/spans here to color the beginning # of this string made the traceback collapse down into # one line with none of the usual linebreaks, I'm not # sure why. So you win qt, no red color for this error. _skip_replay_with_message(replay, "error while loading a replay: " + str(e) + "\n" + traceback.format_exc() + "The replay " + str(replay) + " has been skipped because of this.") finally: self.increment_progressbar_signal.emit(1) if "Similarity" in run.enabled_investigations: lc1.loaded = True lc2.loaded = True else: lc.loaded = True # change progressbar into an undetermined state (animation with # stripes sliding horizontally) to indicate we're processing # the data self.set_progressbar_signal.emit(0) setting_end_dict = { "Similarity": "steal", "Unstable Rate": "relax", "Snaps": "correction", "Frametime": "timewarp", "Manual Analysis": "analysis" } for investigation in run.enabled_investigations: setting = "message_starting_" + setting_end_dict[investigation] message_starting_investigation = get_setting(setting).format(ts=datetime.now()) self.write_to_terminal_signal.emit(message_starting_investigation) if investigation == "Manual Analysis": map_ids = [r.map_id for r in all_replays] if len(set(map_ids)) > 1: self.write_to_terminal_signal.emit("Manual analysis expected replays from a single map, " f"but got replays from maps {set(map_ids)}. Please use a different Manual Analysis " "Check for each map.") self.update_label_signal.emit("Analysis Error (Multiple maps)") self.update_run_status_signal.emit(run.run_id, "Analysis Error (Multiple maps)") self.set_progressbar_signal.emit(-1) run.loadables = None sys.exit(0) # if a replay was removed from all_replays (eg if that replay was not available for download), # and that leaves all_replays with no replays, we don't want to add a result because # the rest of guard expects >=1 replay, leading to confusing errors. if len(all_replays) != 0: self.q.put(AnalysisResult(all_replays)) continue self.update_label_signal.emit("Investigating Replays...") self.update_run_status_signal.emit(run.run_id, "Investigating Replays") if investigation == "Similarity": # core relies on the second replays list being the one empty # if only one of the lists are empty, but we want to allow # users to select whichever group they want if they're only # using a single group, so switch the two if this is the # case. if replays2 and not replays1: replays1, replays2 = replays2, replays1 pairs = replay_pairs(replays1, replays2) for (replay1, replay2) in pairs: _check_event(event) sim = cg.similarity(replay1, replay2) result = StealResult(sim, replay1, replay2) self.q.put(result) if investigation == "Unstable Rate": for replay in all_replays: _check_event(event) # skip replays which have no map info if replay.map_info.available(): try: ur = cg.ur(replay) # Sometimes, a beatmap will have a bugged download where it returns an empty response # when we try to download it (https://github.com/ppy/osu-api/issues/171). Since peppy # doesn't plan on fixing this for unranked beatmaps, we just ignore / skip the error # in all cases. # StopIteration is a bit of a weird exception to catch here, but because of how slider # interacts with beatmaps it will attempt to call `next` on an empty generator if the # beatmap is empty, which of course raises StopIteration. except StopIteration: if requests.get(f"https://osu.ppy.sh/osu/{replay.map_id}").content == b"": self.write_to_terminal_signal.emit("<div style='color:#ff5252'>The " "map " + str(replay.map_id) + "'s download is bugged</div>, so its ur cannot " "be calculated. The replay " + str(replay) + " has been skipped because of this, " "but please report this to the developers through discord or github so it can be " "tracked.") break # If we happen to catch an unrelated error with this `except`, we still want to # raise that so it can be tracked and fixed. else: raise result = RelaxResult(ur, replay) self.q.put(result) else: self.write_to_terminal_signal.emit("<div style='color:#ff5252'>The " "replay " + str(replay) + " has no map id</div>, so its ur cannot " "be calculated. This replay has been skipped because of this.") if investigation == "Snaps": max_angle = get_setting("correction_max_angle") min_distance = get_setting("correction_min_distance") only_on_hitobjs = get_setting("ignore_snaps_off_hitobjs") for replay in all_replays: _check_event(event) snaps = cg.snaps(replay, max_angle, min_distance, only_on_hitobjs) result = CorrectionResult(snaps, replay) self.q.put(result) if investigation == "Frametime": for replay in all_replays: _check_event(event) frametime = cg.frametime(replay) frametimes = cg.frametimes(replay) result = TimewarpResult(frametime, frametimes, replay) self.q.put(result) # flush self.q. Since the next investigation will be processed # so quickly afterwards, we actually need to forcibly wait # until the results have been printed before proceeding. self.print_results_event.clear() self.print_results_signal.emit() self.print_results_event.wait() self.set_progressbar_signal.emit(-1) # empty progressbar # this event is necessary because `print_results` will set # `show_no_cheat_found`, and since it happens asynchronously we need # to wait for it to finish before checking it. So we clear it here, # then wait for it to get set before proceeding. self.print_results_event.clear() # 'flush' self.q so there's no more results left and message_finished_investigation # won't print before results from that investigation which looks strange. self.print_results_signal.emit() self.print_results_event.wait() if self.show_no_cheat_found: self.write_to_terminal_signal.emit(get_setting("message_no_cheat_found").format(ts=datetime.now())) self.write_to_terminal_signal.emit(get_setting("message_finished_investigation").format(ts=datetime.now())) # prevents an error when a user closes the application. Because # we're running inside a new thread, if we don't do this, cg (and) # the library) will get gc'd in another thread. Because library's # ``__del__`` closes the sqlite connection, this causes: # ``` # Traceback (most recent call last): # File "/Users/tybug/Desktop/coding/osu/slider/slider/library.py", line 98, in __del__ # self.close() # File "/Users/tybug/Desktop/coding/osu/slider/slider/library.py", line 94, in close # self._db.close() # sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread. # The object was created in thread id 123145483210752 and this is thread id 4479481280. # ``` cg.library.close() except Exception: # if the error happens before we set the progressbar it stays at # 100%. make sure we reset it here self.set_progressbar_signal.emit(-1) log.exception("Error while running circlecore. Please " "report this to the developers through discord or github.\n") self.update_label_signal.emit("Idle") self.update_run_status_signal.emit(run.run_id, "Finished") # necessary to prevent memory leaks, as we keep the run object around # after the run finishes, but don't need its loadables anymore. run.loadables = None
from ossapi import ossapi import scipy.interpolate as interp import numpy as np import os import scipy from badgeWidget import VisualizerWindow from PyQt5.QtWidgets import QApplication USER = "******" MAP = "2097898" OSU_API_KEY = "" USE_REPLAY = False # if the file should be used instead of downloading the replay REPLAY_PATH = "./replay/nonexistingfile.osr" CACHE_DIR = "./cache/" _api = ossapi(OSU_API_KEY) _cg = Circleguard(OSU_API_KEY) _loader = _cg.loader if not os.path.exists(CACHE_DIR): os.mkdir(CACHE_DIR) _library = Library.create_db(CACHE_DIR) def _get_score_info(): print("@retrieving score info") return _api.get_scores({"b":MAP, "u":USER})[0] def _get_replay(score_info): if USE_REPLAY: print("@USE_REPLAY set, using REPLAY_PATH") replay = ReplayPath(REPLAY_PATH) else:
def benchmark_func(): cg = Circleguard(KEY) cg.ur(replay1)
def benchmark_func(): # creating a new cg object is a hacky way to force a new slider # cache cg = Circleguard(KEY) replay = ReplayPath(RES / "legit" / "legit-1.osr") cg.ur(replay)
# Benchmarks for circleguard. These are not strictly "tests" and will not be run # by unittest, but it felt appropriate to place them in the tests folder. import cProfile from pstats import Stats from circleguard import Circleguard, ReplayPath # pylint has trouble with files named "utils" apparently from utils import KEY, RES # pylint: disable=no-name-in-module cg = Circleguard(KEY) replay1 = ReplayPath(RES / "legit" / "legit-1.osr") cg.load(replay1) def benchmark_ur(with_cache): """ Parameters ---------- with_cache: {"none", "replay", "beatmap", "both"} What caches to use when calculating the ur of the replay. If "replay", a replay that has already been loaded is used. If "beatmap", the beatmap is downloaded on the first call, and is cached thereafter. If "both", both of the above caches apply. """ profiler = cProfile.Profile() if with_cache == "none": num_calls = 10
def cg(self): if not self._cg: from circleguard import Circleguard cache_path = get_setting("cache_dir") + "circleguard.db" self._cg = Circleguard(get_setting("api_key"), cache_path) return self._cg
def setUpClass(cls): cache_path = Path(__file__).parent / "cache.db" cls.cg = Circleguard(KEY, db_path=cache_path)
import re import requests from secret import KEY import logging as log from datetime import datetime from config import (GAMEMODES, FLAIRS, OFFENSES, BLATANT, TITLE_MATCH, API_BASE, API_USERS, REPLY_FOOTER, LIMIT_TOP_PLAYS) from utils import calc_acc, calc_mods, parse_play_rank from circleguard import Circleguard, ReplayID # yes yes, globals bad, I know. The rest of this codebase is already crappy # though so what's the harm in making it a little worse. # TODO cache? reports are pretty infrequent though, probably not necessary, # though they do get repeated sometimes. cg = Circleguard(KEY) def parse_title_data(title): """ Returns a list containing the title data, or None if the regex failed to match. [Gamemode, player_name, [offense_name, blatant?], [flair_name, css_class]] """ title_data = TITLE_MATCH.match(title) if not title_data: # regex didn't match return None gamemode = parse_gamemode(title_data.group(1)) parts = title_data.group(2).split("|", 1) # only split once player = parts[0].strip( ) # take from gamemode to first pipe, remove leading + trailing spaces offense = parts[-1].strip(
def get_frametimes(self, replay): cg = Circleguard(get_setting("api_key")) result = list(cg.timewarp_check(replay)) return result[0].frametimes
class VisualizeTab(QFrame): def __init__(self): super().__init__() self.result_frame = ResultsTab() self.result_frame.results.info_label.hide() self.map_id = None self.q = Queue() self.replays = [] cache_path = get_setting("cache_dir") + "circleguard.db" self.cg = Circleguard(get_setting("api_key"), cache_path) self.info = QLabel(self) self.info.setText( "Visualizes Replays. Has theoretically support for an arbitrary amount of replays." ) self.label_map_id = QLabel(self) self.update_map_id_label() self.file_chooser = FolderChooser("Add Replays", folder_mode=False, multiple_files=True, file_ending="osu! Replayfile (*osr)", display_path=False) self.file_chooser.path_signal.connect(self.add_files) self.folder_chooser = FolderChooser("Add Folder", display_path=False) self.folder_chooser.path_signal.connect(self.add_folder) layout = QGridLayout() layout.addWidget(self.info) layout.addWidget(self.file_chooser) layout.addWidget(self.folder_chooser) layout.addWidget(self.label_map_id) layout.addWidget(self.result_frame) self.setLayout(layout) def start_timer(self): timer = QTimer(self) timer.timeout.connect(self.run_timer) timer.start(250) def run_timer(self): self.add_widget() def update_map_id_label(self): self.label_map_id.setText(f"Current beatmap_id: {self.map_id}") def add_files(self, paths): thread = threading.Thread(target=self._parse_replays, args=[paths]) thread.start() self.start_timer() def add_folder(self, path): thread = threading.Thread(target=self._parse_folder, args=[path]) thread.start() self.start_timer() def _parse_replays(self, paths): for path in paths: # guaranteed to end in .osr by our filter self._parse_replay(path) def _parse_folder(self, path): for f in os.listdir(path): # os.walk seems unnecessary if f.endswith(".osr"): self._parse_replay(os.path.join(path, f)) def _parse_replay(self, path): replay = ReplayPath(path) self.cg.load(replay) if self.map_id is None or len( self.replays) == 0: # store map_id if nothing stored log.info(f"Changing map_id from {self.map_id} to {replay.map_id}") self.map_id = replay.map_id self.update_map_id_label() elif replay.map_id != self.map_id: # ignore replay with diffrent map_ids log.error( f"replay {replay} doesn't match with current map_id ({replay.map_id} != {self.map_id})" ) return if not any(replay.replay_id == r.data.replay_id for r in self.replays): # check if already stored log.info( f"adding new replay {replay} with replay id {replay.replay_id} on map {replay.map_id}" ) self.q.put(replay) else: log.info( f"skipping replay {replay} with replay id {replay.replay_id} on map {replay.map_id} since it's already saved" ) def add_widget(self): try: while True: replay = self.q.get(block=False) widget = EntryWidget( f"{replay.username}'s play with the id {replay.replay_id}", "Delete", replay) widget.clicked_signal.connect(self.remove_replay) self.replays.append(widget) self.result_frame.results.layout.insertWidget(0, widget) except Empty: pass def remove_replay(self, data): replay_ids = [replay.data.replay_id for replay in self.replays] index = replay_ids.index(data.replay_id) self.result_frame.results.layout.removeWidget(self.replays[index]) self.replays[index].deleteLater() self.replays[index] = None self.replays.pop(index)