def check_word(word): if GlobalDictionaryWord.get(word) is None: on_server = ndb.Key(UnknownWord, word).get() if on_server is None: UnknownWord(word=word, id=word, times_used=1).put() elif not on_server.ignored: on_server.times_used += 1 on_server.put()
def post(self, *args, **kwargs): words = json.loads(self.request.get("json")) for word in words: g_word = GlobalDictionaryWord.get(word["word"]) if not g_word or g_word.tags.find("-deleted") >= 0: continue current_word = ComplainedWord(device=self.device_key, word=word["word"], reason=word["reason"]) if "replace_word" in word: current_word.replacement_word = \ word["replace_word"] current_word.put()
def update_word(self, word, word_outcome, explanation_time, rating, game_key): word_db = GlobalDictionaryWord.get(word) if not word_db: return word_db.used_times += 1 if word_outcome == 'guessed': word_db.guessed_times += 1 elif word_outcome == 'failed': word_db.failed_times += 1 time_sec = explanation_time word_db.total_explanation_time += time_sec if word_outcome == 'guessed': pos = time_sec // 5 l = word_db.counts_by_expl_time while pos >= len(l): l.append(0) l[pos] += 1 word_db.used_games.append(game_key.urlsafe()) word_db.E = rating.mu word_db.D = rating.sigma word_db.put()
def get(self, *args, **kwargs): word = self.request.get('word', None) entity, games, top, bottom, rand, danger_top = None, None, None, None, None, None if word: entity = GlobalDictionaryWord.get(word) if not entity: danger_top = memcache.get("danger_top") if not danger_top: danger_top = GlobalDictionaryWord.query().order( -GlobalDictionaryWord.danger).fetch(limit=10) memcache.set("danger_top", danger_top, time=60 * 60 * 12) top = memcache.get("words_top") if not top: top = GlobalDictionaryWord.query().order( -GlobalDictionaryWord.E).fetch(limit=10) memcache.set("words_top", top, time=60 * 60 * 12) bottom = memcache.get("words_bottom") if not bottom: bottom = GlobalDictionaryWord.query().order( GlobalDictionaryWord.E).fetch(limit=10) memcache.set("words_bottom", bottom, time=60 * 60 * 12) q = GlobalDictionaryWord.query().filter( GlobalDictionaryWord.used_times > 0) c = memcache.get("used_words_count") if not c: c = q.count() memcache.set("used_words_count", c, time=60 * 60 * 12) if c >= 10: rand = q.fetch(limit=10, offset=randint(0, c - 10)) self.draw_page('statistics/word_statistic', word=word, word_entity=entity, top=top if top else [], bottom=bottom if bottom else [], rand=rand if rand else [], danger=danger_top if danger_top else [])
def get(self, *args, **kwargs): word = self.request.get("word", None) entity, games, top, bottom, rand, danger_top = None, None, None, None, None, None if word: entity = GlobalDictionaryWord.get(word) if not entity: danger_top = memcache.get("danger_top") if not danger_top: danger_top = GlobalDictionaryWord.query().order(-GlobalDictionaryWord.danger).fetch(limit=10) memcache.set("danger_top", danger_top, time=60 * 60 * 12) top = memcache.get("words_top") if not top: top = GlobalDictionaryWord.query().order(-GlobalDictionaryWord.E).fetch(limit=10) memcache.set("words_top", top, time=60 * 60 * 12) bottom = memcache.get("words_bottom") if not bottom: bottom = GlobalDictionaryWord.query().order(GlobalDictionaryWord.E).fetch(limit=10) memcache.set("words_bottom", bottom, time=60 * 60 * 12) q = GlobalDictionaryWord.query().filter(GlobalDictionaryWord.used_times > 0) c = memcache.get("used_words_count") if not c: c = q.count() memcache.set("used_words_count", c, time=60 * 60 * 12) if c >= 10: rand = q.fetch(limit=10, offset=randint(0, c - 10)) self.draw_page( "statistics/word_statistic", word=word, word_entity=entity, top=top if top else [], bottom=bottom if bottom else [], rand=rand if rand else [], danger=danger_top if danger_top else [], )
def post(self): game_key = ndb.Key(urlsafe=self.request.get('game_key')) logging.info("Handling log of game {}".format(game_key.id())) if game_key.kind() not in ('GameLog', 'GameHistory'): self.abort(200) log_db = game_key.get() if log_db is None: logging.error("Can't find game log") self.abort(200) is_legacy = game_key.kind() == 'GameHistory' try: words_orig, seen_words_time, words_outcome, explained_at_once, explained_pair, players_count,\ start_timestamp, finish_timestamp = self.parse_history(log_db) if is_legacy else self.parse_log(log_db) if start_timestamp and finish_timestamp: self.update_game_len_prediction(players_count, 'game', finish_timestamp - start_timestamp) bad_words_count = 0 for k, v in seen_words_time.items(): if v < 2: bad_words_count += 1 if 2*len(seen_words_time) < len(words_orig): raise BadGameError('suspect_too_little_words') if 2*bad_words_count > len(seen_words_time): raise BadGameError('suspect_too_quick_explanation') for word in words_orig: self.check_word(word) word_db = [GlobalDictionaryWord.get(word) for word in words_orig] self.ratings = [TRUESKILL_ENVIRONMENT.create_rating(word.E, word.D) if word else None for word in word_db] d = defaultdict(list) for word in seen_words_time: if explained_at_once[word]: d[explained_pair[word]].append(word) for l in d.values(): l.sort(key=lambda item: (words_outcome[item] != 'failed', -seen_words_time[item])) self.rate(l) d.clear() d = defaultdict(list) for word in seen_words_time: if words_outcome[word] in ('guessed', 'failed'): d[explained_pair[word][0]].append(word) for l in d.values(): l.sort(key=lambda item: (words_outcome[item] != 'failed', -seen_words_time[item])) self.rate(l, coef=0.3) d.clear() for word in seen_words_time: if words_outcome[word] == 'guessed': d[explained_pair[word][1]].append(word) for l in d.values(): l.sort(key=lambda item: -seen_words_time[item]) self.rate(l, coef=0.8) words = [] for word in seen_words_time: if words_outcome[word] in ('guessed', 'failed'): words.append(word) words.sort(key=lambda item: (words_outcome[item] != 'failed', -seen_words_time[item])) self.rate(words, coef=0.6) for i in range(len(words_orig)): if i in seen_words_time: self.update_word(words_orig[i], words_outcome[i], seen_words_time[i], self.ratings[i], game_key) if start_timestamp: start_timestamp //= 1000 if finish_timestamp: finish_timestamp //= 1000 duration = finish_timestamp - start_timestamp else: duration = 0 game_date = get_date(start_timestamp) self.update_daily_statistics(game_date, len(seen_words_time), players_count, duration) self.update_total_statistics(len(seen_words_time), start_timestamp) if players_count: self.update_statistics_by_player_count(players_count) memcache.delete_multi(["danger_top", "words_top", "words_bottom", "used_words_count"]) except BadGameError as e: if isinstance(e, BadGameError): reason = e.reason else: reason = 'format-error' log_db.ignored = True log_db.reason = reason log_db.put() logging.warning("Did not handle and marked this game as ignored: {}".format(log_db.reason)) self.abort(200)
def post(self): game_key = ndb.Key(urlsafe=self.request.get('game_key')) logging.info("Handling log of game {}".format(game_key.id())) if game_key.kind() not in ('GameLog', 'GameHistory'): self.abort(200) log_db = game_key.get() if log_db is None: logging.error("Can't find game log") self.abort(200) is_legacy = game_key.kind() == 'GameHistory' try: words_orig, seen_words_time, words_outcome, explained_at_once, explained_pair, players_count, \ start_timestamp, finish_timestamp = self.parse_history(log_db) if is_legacy else self.parse_log(log_db) if start_timestamp and finish_timestamp: self.update_game_len_prediction( players_count, 'game', finish_timestamp - start_timestamp) bad_words_count = 0 if 2 * len(seen_words_time) < len(words_orig): raise BadGameError('suspect_too_little_words') for k, v in seen_words_time.items(): if v < 2: bad_words_count += 1 if 2 * bad_words_count > len(seen_words_time): raise BadGameError('suspect_too_quick_explanation') for word in words_orig: self.check_word(word) self.word_db = [ GlobalDictionaryWord.get(word) for word in words_orig ] self.ratings = [ TRUESKILL_ENVIRONMENT.create_rating(word.E, word.D) if word else None for word in self.word_db ] self.langs = get_langs() d = defaultdict(list) for word in seen_words_time: if explained_at_once[word] and words_outcome[word] == 'guessed': d[explained_pair[word]].append(word) for l in d.values(): l.sort(key=lambda item: -seen_words_time[item]) self.rate(l) d.clear() for word in seen_words_time: if words_outcome[word] == 'guessed': d[explained_pair[word][0]].append(word) for l in d.values(): l.sort(key=lambda item: -seen_words_time[item]) self.rate(l, coef=0.3) d.clear() for word in seen_words_time: if words_outcome[word] == 'guessed': d[explained_pair[word][1]].append(word) for l in d.values(): l.sort(key=lambda item: -seen_words_time[item]) self.rate(l, coef=0.8) words = [] for word in seen_words_time: if words_outcome[word] == 'guessed': words.append(word) words.sort(key=lambda item: -seen_words_time[item]) self.rate(words, coef=0.6) for i in range(len(words_orig)): if i in seen_words_time: self.update_word(words_orig[i], words_outcome[i], seen_words_time[i], self.ratings[i], game_key) if start_timestamp: start_timestamp //= 1000 log_db.time = datetime.datetime.fromtimestamp(start_timestamp) log_db.put() if finish_timestamp: finish_timestamp //= 1000 duration = finish_timestamp - start_timestamp else: duration = 0 game_date = get_date(start_timestamp) self.update_daily_statistics(game_date, len(seen_words_time), players_count, duration) self.update_total_statistics(len(seen_words_time), start_timestamp) if players_count: self.update_statistics_by_player_count(players_count) memcache.delete_multi([ "danger_top", "words_top", "words_bottom", "used_words_count" ]) except BadGameError as e: if isinstance(e, BadGameError): reason = e.reason else: reason = 'format-error' log_db.ignored = True log_db.reason = reason log_db.put() logging.warning( "Did not handle and marked this game as ignored: {}".format( log_db.reason)) self.abort(200)