def initializeByUserList(self, file, lex, user, secs): # returns a table number # TODO gevent.sleep(0.1) (look into switching context to prevent this from blocking if using async. or use a proper queue) t1 = time.time() lineNumber = 0 alphaSet = set() for line in file: word = line.strip() if len(word) > 15 or len(word) < 2: return 0 lineNumber += 1 if lineNumber > wordwalls.settings.UPLOAD_FILE_LINE_LIMIT: return 0 alphaSet.add(alphagrammize(word)) pkList = [] failedAlphagrams = [] for alphagram in alphaSet: try: a = Alphagram.objects.get(alphagram=alphagram, lexicon=lex) pkList.append(a.pk) except: failedAlphagrams.append(alphagram) # doesn't exist here. TODO send a message saying some of your words couldn't be uploaded. random.shuffle(pkList) print 'number of uploaded alphagrams', len(pkList) print 'elapsed time', time.time() - t1 addlParams = {'timerSecs': secs} if len(failedAlphagrams) > 0: addlParams['introMessage'] = ('Could not process all your alphagrams. (Did you choose the right lexicon?) ' + 'You had ' + str(len(failedAlphagrams)) + ' unmatched alphagrams (the first of which is ' + failedAlphagrams[0] +').') wgm = self.createGameModelInstance(user, GenericTableGameModel.SINGLEPLAYER_GAME, lex, len(pkList), json.dumps(pkList), len(pkList), json.dumps(range(len(pkList))), 0, json.dumps([]), 0, json.dumps([]), **addlParams) wgm.save() wgm.inTable.add(user) wgm.playing.add(user) return wgm.pk # this is a table number id!
def get_alphas_from_words(file_contents): line_number = 0 alpha_set = set() for line in file_contents: word = line.strip() if len(word) > 15: raise UserListParseException("List contains non-word elements") line_number += 1 if line_number > wordwalls.settings.UPLOAD_FILE_LINE_LIMIT: raise UserListParseException( "List contains more words than the current allowed per-file " "limit of %d" % wordwalls.settings.UPLOAD_FILE_LINE_LIMIT) if len(word) > 1: alpha_set.add(alphagrammize(word)) return alpha_set
def generate_blank_bingos_challenge(self, lex, ch_date): """ Reads the previously generated blank bingo files for lex. """ start = time.time() bingos = [] for length in (7, 8): filename = ch_date.strftime("%Y-%m-%d") + "-%s-%ss.txt" % ( lex.lexiconName, length) path = os.path.join(os.getenv("HOME"), 'blanks', filename) f = open(path, 'rb') for line in f: qas = line.split() # Look up pks for words. words = qas[1:] word_pks = [Word.objects.get(word=word, lexicon=lex).pk for word in words] bingos.append({'q': alphagrammize(qas[0]), 'a': word_pks}) f.close() logger.debug("Elapsed: %s" % (time.time() - start)) return bingos
def create_common_words_list(lname, friendly_name): f = open(COMMON_WORDS_DIR + lname) words = f.read() f.close() words = words.split('\n') r = redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_ALPHAGRAMS_DB) pipe = r.pipeline() for word in words: alpha = alphagrammize(word) pipe.get('%s:%s' % (alpha, OWL2_LEX_INDEX)) pks = pipe.execute() pks = [int(pk) for pk in pks] nl = NamedList(lexicon=Lexicon.objects.get(lexiconName='OWL2'), numQuestions=len(pks), wordLength=0, isRange=False, questions=json.dumps(pks), name=friendly_name) nl.save()
def generate_blank_bingos_challenge(self, lex, ch_date): """ Reads the previously generated blank bingo files for lex. """ start = time.time() bingos = [] for length in (7, 8): filename = ch_date.strftime( "%Y-%m-%d") + "-%s-%ss.txt" % (lex.lexiconName, length) path = os.path.join(os.getenv("HOME"), 'blanks', filename) f = open(path, 'rb') for line in f: qas = line.split() # Look up pks for words. words = qas[1:] word_pks = [ Word.objects.get(word=word, lexicon=lex).pk for word in words ] bingos.append({'q': alphagrammize(qas[0]), 'a': word_pks}) f.close() logger.debug("Elapsed: %s" % (time.time() - start)) return bingos
def alphagram_history_search(request): """ Search for the alphagram in the user's quizzes to see when they last saw it. """ lexicon = request.GET.get('lexicon') letters = request.GET.get('letters').replace('!', '') alphagram = alphagrammize(letters).upper() summary = [] try: lex_obj = Lexicon.objects.get(lexiconName=lexicon) except Lexicon.DoesNotExist: return response('Bad lexicon', StatusCode.BAD_REQUEST) # This is an expensive function. lists = WordList.objects.filter(user=request.user, lexicon=lex_obj).filter( category=WordList.CATEGORY_ANAGRAM).filter(is_temporary=False) for wl in lists: questions = json.loads(wl.origQuestions) for idx, q in enumerate(questions): if q['q'] == alphagram: summary.append(get_q_summary(wl, idx, alphagram)) return response(summary)
def create_common_words_list(lname, friendly_name): f = open(COMMON_WORDS_DIR + lname) words = f.read() f.close() words = words.split('\n') alphs = set([alphagrammize(word) for word in words]) cursor = connection.cursor() cursor.execute( 'SELECT probability_pk FROM base_alphagram ' 'WHERE lexicon_id = %s AND alphagram in %s' % (OWL2_LEX_INDEX, str(tuple(alphs))) ) rows = cursor.fetchall() pks = [] for row in rows: pks.append(row[0]) nl = NamedList(lexicon=Lexicon.objects.get(lexiconName='OWL2'), numQuestions=len(pks), wordLength=0, isRange=False, questions=json.dumps(pks), name=friendly_name) nl.save()
def get_alphas_from_words(contents, max_words): """ Get all the alphagrams from the given words. Return a list of Alphagram objects. """ line_number = 0 alpha_set = set() for line in contents.split('\n'): word = line.strip() if len(word) > 15: raise UserListParseException(_("List contains non-word elements")) line_number += 1 if line_number > max_words: raise UserListParseException( _("List contains more words than the current allowed per-file " "limit of {}").format(max_words)) if len(word) > 1: try: alpha_set.add(alphagrammize(word)) except KeyError: raise UserListParseException( _('List contains invalid characters.')) return [Alphagram(a) for a in alpha_set]
def get_alphas_from_words(contents: str, max_words: int) -> List[str]: """ Get all the alphagrams from the given words. Return a list of alphagrams """ line_number = 0 alpha_set = set() for line in contents.split('\n'): word = line.strip() if len(word) > 15: raise UserListParseException(_("List contains non-word elements")) line_number += 1 if line_number > max_words: raise UserListParseException( _("List contains more words than the current allowed per-file " "limit of {}").format(max_words)) if len(word) > 1: try: alpha_set.add(alphagrammize(word)) except KeyError: raise UserListParseException( _('List contains invalid characters.')) return list(alpha_set)
def get_alphas_from_word_list(word_list: list): alpha_set = set() for word in word_list: alpha_set.add(alphagrammize(word)) return [Alphagram(a) for a in alpha_set]
def test_alphagrammize(self): self.assertEqual(alphagrammize('BILLOWY'), 'BILLOWY') self.assertEqual(alphagrammize('ACERVULI'), 'ACEILRUV') self.assertEqual(alphagrammize('PRENTICE'), 'CEEINPRT') self.assertEqual(alphagrammize(u'1ARMAQUITO'), u'AA1IMOQRTU') self.assertEqual(alphagrammize(u'ÑOÑE3IN1AS'), u'A1EINÑÑO3S')
def test_alphagrammize(self): self.assertEqual(alphagrammize('BILLOWY'), 'BILLOWY') self.assertEqual(alphagrammize('ACERVULI'), 'ACEILRUV') self.assertEqual(alphagrammize('PRENTICE'), 'CEEINPRT') self.assertEqual(alphagrammize('1ARMAQUITO'), 'AA1IMOQRTU') self.assertEqual(alphagrammize('ÑOÑE3IN1AS'), 'A1EINÑÑO3S')