def parse(self): for file_name in os.listdir(self.data_dir): data_file = open(self.data_dir + file_name, "r") title = "" while not title: title = data_file.readline().strip() title = " ".join(word[0].upper() + word[1:].lower() for word in title.split()) data_file.seek(0) for line in data_file: line = line.strip() if line: words = set(map(string.lower, re.sub("[^\w]", " ", line).split())) for word in words: word_data = Word.get_by_id(word, parent=self.parent) if not word_data: word_data = Word(parent=self.parent, id=word, name=word) new_mention = Mention(line=line, work=title) if word_data.mentions: word_data.mentions.append(new_mention) else: word_data.mentions = [new_mention] word_data.put()
def normalize(cls, word: Word) -> str: res = word.source.strip().upper() # Remove yer before linebreak unless tagged otherwise if word.tagset.note is not None and not ("+ъ" in word.tagset.note or "+ь" in word.tagset.note): res = cls._replace_yer_before_linebreak(res) # Remove milestones res = re.sub(Milestone.REGEX, "", res) if word.is_cardinal_number(): return word.tagset.pos # Non-spelled out numerals if word.is_ordinal_number(): return str( Number(res.replace("(", "").replace(")", "").replace( " ", ""))) # Spelled-out numerals res = replace_chars( res, characters.latin_special_characters, characters.cyrillic_special_characters, ) for idx in [idx for idx, char in enumerate(res) if char == "V"]: # Izhitsa positional replacement res = res[:idx] + cls._replace_izhitsa(res, idx) + res[idx + 1:] # Orthography normalization res = modif(res, word.tagset.pos if word.tagset is not None else "") return res.replace("#", "").replace("(", "").replace(")", "")
def get(self, word): if word == "END": self.render("end.html") return back_to_word = self.get_argument("back_to", None) if back_to_word and back_to_word != word: try: _word = Word.objects(word=word).get() except DoesNotExist as e: logging.error(e) # self.render("word.html", word=None, error="No word found, sorry", back_to_word=None) self.render_word(error="No word found, sorry") return # self.render("word.html", word=_word.format_response(), error=None, back_to_word=back_to_word) self.render_word(word_content=_word.format_response(), back_to_word=back_to_word) return try: _word = Word.objects(word=word).get() except DoesNotExist: # self.render("word.html", word=None, error="word '{}' not found, sorry".format(word), back_to_word=None) self.render_word(error="word '{}' not found, sorry".format(word)) return logging.info(_word) if _word: self.render_word(word_content=_word.format_response()) else: self.render_word(error="word {} not found, sorry".format(word)) return
def get(self, word): """ 完成一个单词 :param word: :return: """ back_to_word = self.get_argument("back_to", None) if back_to_word and back_to_word != word: try: _word = Word.objects(word=word).get() except DoesNotExist as e: logging.error(e) self.render_word(word_content=None, error="No word found, sorry") return self.render_word(word_content=_word.format_response(), back_to_word=back_to_word) return record = self.get_current_record() for _word in record.words: if _word['word'] == word: _word['status'] = WORD_FINISHED record.save() break next_word = record.next_word if next_word == "END": self.render("end.html") else: _word = Word.objects(word=record.next_word).get() self.render_word(word_content=_word.format_response())
def test_clear_datastore(self): """Tests if the database is being cleared and considers non-empty lists of instances for all the models used by the application. """ self.assertNotEquals(Word.query().fetch(), []) self.assertNotEquals(Work.query().fetch(), []) self.assertNotEquals(Character.query().fetch(), []) self.assertNotEquals(list(FileMetadata.all().run()), []) self.testapp.get('/') self.assertEquals(Word.query().fetch(), []) self.assertEquals(Work.query().fetch(), []) self.assertEquals(Character.query().fetch(), []) self.assertEquals(list(FileMetadata.all().run()), [])
def get(self, word_str): template_params = {} user = User.checkUser() if user: template_params['user'] = user.email template_params['logoutUrl'] = user.logoutUrl() template_params['word_str'] = word_str template_params['found'] = True word = Word.wordByString(word_str) if word is None: template_params['found'] = False else: scores_results = WordScore.scoresForWord(word) template_params['scores'] = [] for score in scores_results: u = score.user.get() if u is not None: template_params['scores'].append({ "email": "****"+u.email[4:], "attempts": score.attempts }) html = template.render("web/templates/wordboard.html", template_params) self.response.write(html)
def post(self): replyJson = {} user = User.checkUser() if not user: self.response.set_status(401) self.response.write('Need active user to proceed') return guess = self.request.get('word') if not guess: self.response.set_status(400) self.response.write('Can not process an empty guess') return word = Word.todaysWord() score = WordScore.getScore(user, word) score.incAttempts() if guess == word.word: replyJson['solved'] = True replyJson['attempts'] = score.attempts score.setSolved() else: replyJson['solved'] = False replyJson['attempts'] = score.attempts if score.attempts >= config.ATTEMPTS_FOR_CLUE or replyJson['solved']: replyJson['wordLen'] = word.len self.response.write(json.dumps(replyJson))
def get_suggestion(word): '''Gets the most used suggestion for a misspelled word. The suggestion must exist in the database and must have distance 1 to the input word. There is more than one suggestion at distance 1 that exists in the database, it chooses the one that appears the database. Args: word: misspelled word. Returns: A suggestion of this word for the user or None if it doesn't find any. ''' if Word.get_by_id(word): return None candidates = _select_valid_words(_words_edit_distance_one(word)) best_count = 0 suggestion = None for candidate in candidates: if candidate.count > best_count: best_count = candidate.count suggestion = candidate if suggestion: return suggestion.name return None
def post(self): url = self.get_argument('url') # Fetch word dictionary with frequency word_dict = build_word_dict(url) # Get public key public_key = get_keys("public_key") new_dict_array = [] with self.make_session() as session: # Iterate through all the word in the word_dict , create the word hash and # check if a word hash exists in the database for word, quantity in word_dict: new_dict = {'text': word, 'size': quantity} new_dict_array.append(new_dict) salted_hash = get_salted_hash(word) word_element = yield as_future( session.query(Word).filter( Word.word_hash == salted_hash).first) if not word_element: # Create a word object and add to session encrypted_word = encrypt_data(word, public_key) session.add( Word(word_hash=salted_hash, word=encrypted_word, count=quantity)) else: # update the word object with the frequency word_element.count += quantity session.commit() self.render("base.html", word_array=json_encode(new_dict_array))
def index(): user = current_user() id = int(request.args.get('id', -1)) if id == -1: ms = Word.all(wall_id=1) wall = Wall.one(id=1) else: ms = Word.all(wall_id=id) wall = Wall.one(id=id) ws = Wall.all() return render_template("wall/index.html", ms=ms, ws=ws, wall=wall, login_user=user)
def _get_word_mentions_by_char(word_name, work_title, char_name): """Get the words that a said by a character of a certain work Args: word_name: the string of the word being searched (lowercase). work_title: the title of the work in which the character appears (titlecase). char_name: the name of the character (titlecase). Returns: A dictionary indexed by the work and the characters. This redundant data is created in order to comply with the data pattern. """ word = Word.get_by_id(word_name) if not word: return {}, 0 work = Work.get_by_id(work_title, parent=word.key) if not work: return {}, 0 char = Character.get_by_id(char_name, parent=work.key) if not char: return {}, 0 mentions = char.get_string_mentions() bold_mentions = _bold_mentions(word_name, mentions) mentions_dict = {work_title: {char_name: bold_mentions}} return mentions_dict, char.count
def _get_word_mentions_in_work(word_name, work_title): """Get all mentions of a word that appear in a certain work. Args: word_name: the string of the word being searched (lowercase). work_title: the title of the work (titlecase). Returns: A dictionary first indexed by work and second by character. The work is inserted to comply with the data pattern. """ word = Word.get_by_id(word_name) if not word: return {}, 0 work = Work.get_by_id(work_title, parent=word.key) if not work: return {}, 0 chars = Character.query(ancestor=work.key).fetch() mentions_dict = {work_title: {}} for char in chars: mentions = char.get_string_mentions() bold_mentions = _bold_mentions(word_name, mentions) mentions_dict[work_title][char.name] = bold_mentions return mentions_dict, work.count
def get(self): data = { 'title': 'Words | 单词', 'words': Word.all_word() } self.render('purecss_ui/word.html', **data)
def get(self): """Clears the datastore.""" ndb.delete_multi(Word.query().fetch(keys_only=True)) ndb.delete_multi(Work.query().fetch(keys_only=True)) ndb.delete_multi(Character.query().fetch(keys_only=True)) ndb.delete_multi(Line.query().fetch(keys_only=True)) db.delete(FileMetadata.all(keys_only=True).run()) self.redirect('/admin')
def new_game(request): user = User.query(User.name == request.user_name).get() total_words = Word.query().count() rand_num = random.randint(1, total_words) word = Word.query(Word.word_id == rand_num).get() if not user: raise endpoints.NotFoundException( 'A User with that name does not exist!') try: game = Game.new_game(user.key, word.key) except AttributeError: raise endpoints.BadRequestException('Random word not found') # Use a task queue to update the average attempts remaining. # This operation is not needed to complete the creation of a new game # so it is performed out of sequence. taskqueue.add(url='/tasks/cache_average_attempts') return game
def addWord(): word = request.form['word'].lower() if( len(word.strip()) == 0 ): return 'no word' if( len(Word.objects(word=word)) != 0): # do not add, already in Word return 'already in db' else: definition = request.form['definition'] mnemonic = request.form['mnemonic'] partOfSpeech = request.form['partOfSpeech'] relatedWords = request.form.getlist('relatedWords') newWord = Word(word=word, definition=definition, mnemonic=mnemonic, partOfSpeech=partOfSpeech, relatedWords=relatedWords) newWord.save() # make sure each of the relatedWords has this word for relatedWord in relatedWords: relatedWordObj = Word.objects.get(word=relatedWord) if relatedWord not in relatedWordObj.relatedWords: relatedWordObj.relatedWords.append(word) relatedWordObj.save() return newWord.json()
def test(word_id=None, page=1): if 'user_id' not in session: return redirect("/") total = 11 if session['url'] >= total: session['url'] = 1 return redirect('/result') word_list = Word.get_words() user_words = UserActivity.user_words(session['user_id']) if len(user_words) < 10: return redirect("/message") test_choices = [] # Get the answer question_randomiser = random.randint(0, len(user_words) - 1) print(user_words[question_randomiser]) test_question_id = user_words[question_randomiser][4] test_answer = user_words[question_randomiser][6] audio = Audio.find_by_word(test_question_id) audio_url = audio[0].content_url test_choices.append(test_answer) # Get the other options test_choice1 = random.randint(0, len(word_list) - 1) test_choice2 = random.randint(0, len(word_list) - 1) while word_list[test_choice1].name == word_list[ test_choice2].name or word_list[test_choice1].name == user_words[ question_randomiser][6] or word_list[ test_choice2].name == user_words[question_randomiser][6]: test_choice1 = random.randint(0, len(word_list) - 1) test_choice2 = random.randint(0, len(word_list) - 1) test_choices.append(word_list[test_choice1].name) test_choices.append(word_list[test_choice2].name) random.shuffle(test_choices) return render_template("test.html", page=page, url=session['url'], next_word=page < total, test_answer=test_answer, test_choices=test_choices, total=total, audio_url=audio_url, status="home")
def index_reduce(key, values): """Index reduce function. Args: key: a string in the format <word>_SEP<work>_SEP<character> values: the lines in which <word> appears in <work> in a speak of <character> The word is either added to the database or updated with its new occurence, adding info about the work in which it was found, which character pronounced it (if applicable), a count of occurrences and a reference to the line in which it was found. """ keys = key.split(_SEP) word_value, work_value, char_value = keys word = Word.get_by_id(word_value) work_titlecase = titlecase(work_value) if not word: word = Word(id=word_value, name=word_value, count=len(values)) work = Work(parent=word.key, id=work_titlecase, title=work_titlecase, count=len(values)) else: word.count += len(values) work = Work.get_by_id(work_titlecase, parent=word.key) if work: work.count += len(values) else: work = Work(parent=word.key, id=work_titlecase, title=work_titlecase, count=len(values)) character_titlecase = titlecase(char_value) char = Character(parent=work.key, id=character_titlecase, name=character_titlecase, count= len(values)) for line in set(values): char.mentions.append(pickle.loads(line)) word.put() work.put() char.put()
def test_filter_entities_using_query_works(self): '''We can search for all the entities starting from a word.''' retrieved_word = Word.get_by_id("death") self.assertEqual('death', retrieved_word.name) self.assertEqual(2, retrieved_word.count) retrieved_works = Work.query(ancestor=self.word.key).fetch() self.assertEqual(len(retrieved_works), 1) work = retrieved_works[0] retrieved_character = Character.query(ancestor=work.key).fetch() self.assertEqual(len(retrieved_character), 1) char = retrieved_character[0] self.assertEqual(1, len(char.mentions)) self.assertEqual("Though yet of Hamlet our dear brother's death", char.mentions[0].get().line)
def _get_word_works(word_name): """Retrieves all the works in which a word occurs. Args: word_name: the word (lowercase). Returns: A list with the titles of the works. """ word_db = Word.get_by_id(word_name) if not word_db: return [] work_titles = [work_db.title for work_db in Work.query(ancestor=word_db.key).fetch()] return work_titles
def lesson_words(lesson_id, word_id=None): words = Word.find_by_lesson_id(lesson_id) if word_id is None: word_id = UserActivity.get_latest_word_id(session['user_id']) if word_id == words[-1].word_id: word_id = words[0] word_index = 0 for i, word in enumerate(words): if word.word_id == word_id: word_index = i break word = words[word_index] show_video = False user_activity = UserActivity(word.word_id, session['user_id']) if user_activity.is_duplicate() is False: user_activity.save_to_db() else: user_activity.update_timestamp() show_video = True audio = Audio.find_by_word(word.word_id) audio_url = audio[0].content_url video = Video.find_by_word(word.word_id) video_url = video[0].content_url next_word = None if word_index < len(words) - 1: next_word = words[word_index + 1] previous_word = None if word_index != 0: previous_word = words[word_index - 1] return render_template("video.html", lesson_id=lesson_id, show_video=show_video, audio_url=audio_url, video_url=video_url, word=word, next_word=next_word, previous_word=previous_word, status="home")
def create_word(text: str): """ Creates a new word in the word database if it doesn't exist yet (case insensitive) :param text: The text for the new word :returns: The created word if successful """ lower_text = text.lower() existing_word = Word.query.filter( func.lower(Word.text) == lower_text).first() if existing_word is not None: return existing_word word = Word(text=text) db.session.add(word) db.session.commit() return word
def make_move(request): game = get_by_urlsafe(request.urlsafe_game_key, Game) game.number_of_guess += 1 word = Word.query(Word.key == game.word).get() split_word = list(word.word) guess = request.guess game_history = History.query(History.game == game.key) letters_guessed_so_far = [] for history in game_history.iter(): letters_guessed_so_far.append(history.guess) if game.game_over is True: msg = 'Game already over!' elif guess == '' or guess.isdigit() or len(guess) > 1: msg = "Please enter a single alpha character only" elif guess in letters_guessed_so_far: msg = "You have already used that letter" elif guess not in split_word: msg = "letter isn't in word" game.guesses_remaining -= 1 save_history(game.key, game.number_of_guess, guess, False) else: msg = "Letter is in word!" save_history(game.key, game.number_of_guess, guess, True) # Added in a sleep because of latency writing to the datastore. # http://stackoverflow.com/questions/9137214/writing-then-reading-entity-does-not-fetch-entity-from-datastore time.sleep(0.1) count_of_success = History.query(History.status == True).filter(History.game == game.key).count() if len(word.word) == count_of_success: msg = "You've won! The word was {}".format(word.word) game.end_game(True) elif game.guesses_remaining == 0: msg = 'You have run out of guesses! The word was {}'.format(word.word) game.end_game() taskqueue.add(url='/tasks/cache_average_attempts') game.put() return game, msg
def setUp(self): ''' Creates an instance of Testbed class and initializes it with the datastore stub. Also creates the entities and stores them in the database.''' self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.word = Word(id="death", name="death", count=2) self.work = Work( parent=self.word.key, id="Hamlet", title="Hamlet", count=1) self.character = Character( parent=self.work.key, id="Claudius", name="Claudius", count=1) line = Line(line='Though yet of Hamlet our dear brother\'s death').put() self.character.mentions = [line] self.word_key = self.word.put() self.work_key = self.work.put() self.character_key = self.character.put()
def _get_work_characters(word_name, work_title): """Retrieves all the characters that mentions a word in a given work. Args: word_name: the string of the word which the characters mention (lowercase). work_title: the title of the work of interest (titlecase). Returns: A list with the names of the characters. """ word_db = Word.get_by_id(word_name) if not word_db: return [] work_db = Work.get_by_id(work_title, parent=word_db.key) if not work_db: return [] char_names = [char_db.name for char_db in Character.query(ancestor=work_db.key).fetch()] return char_names
def get(self): """Retrieves formatted information to the treemap visualization. It expects a list of elements, and each element is a list of the following type: [name, parent's name, value, color value] In which name and parent's name are strings, value is an integer proportional to the size of the resulting rectangle on the treemap and color value is the value to be used as color acording to the color range. It is called the function get_all_word_mentions to obtain a dictionary that maps from work and character to mentions. """ searched_value = cgi.escape(self.request.get('searched_word').lower()) if not searched_value: return all_mentions, count = _get_all_word_mentions(searched_value) if not count: return treemap_data = [['Location', 'Parent', 'Word Occurrences'], ['Shakespeare\'s Corpus', None, count]] word_db = Word.get_by_id(searched_value) for work in all_mentions: work_db = Work.get_by_id(work, parent=word_db.key) treemap_data.append([work, 'Shakespeare\'s Corpus', work_db.count]) for char in all_mentions[work]: if not char: continue char_db = Character.get_by_id(char, parent=work_db.key) treemap_data.append([{'v': work + '+' + char, 'f': char}, work, char_db.count]) self.response.headers['Content-Type'] = 'text/json' self.response.out.write(json.encode({"array": treemap_data}))
def get(self): template_params = {} user = User.checkUser() if user: template_params['user'] = user.email template_params['logoutUrl'] = user.logoutUrl() template_params['words'] = [] words = Word.allWords() for w in words: #we don't want to show today's word in the scoreboard! if w.day == datetime.date.today(): continue template_params['words'].append({ "word": w.word, "date": w.day }) html = template.render("web/templates/scoreboard.html", template_params) self.response.write(html)
def __get_upos(word: Word) -> str: if word.pos == "сущ": return "PROPN" if word.is_proper else "NOUN" if word.pos in ("прил", "прил/ср", "прил/н", "числ/п"): return "ADJ" if word.pos == "числ" or word.is_cardinal_number(): return "NUM" if word.pos == "мест": return "PRON" # TODO Distinguish DET if word.pos in ("гл", "гл/в", "прич", "прич/в", "инф", "инф/в", "суп"): return "AUX" if word.tagset.role == "св" else "VERB" if word.pos == "нар": return "ADV" if word.pos in ("пред", "посл"): return "ADP" if word.pos == "союз": return "CCONJ" # TODO Distinguish SCONJ if word.pos == "част": return "PART" if word.pos == "межд": return "INTJ" return "X"
def get(self): searched_value = self.request.get('searched_word') value = searched_value if searched_value else '' work_mentions = [] number_results = 0 if value: start = time.time() word = Word.get_from_shakespeare_index(cgi.escape(value)) end = time.time() if word: # Grouping mentions by work for UI display work_mentions = {} for mention in word.mentions: number_results += 1 # Making the words stay bold line = re.sub(value, "<b>%s</b>" % value, mention.line) if mention.work not in work_mentions: work_mentions[mention.work] = [] work_mentions[mention.work].append(line) print work_mentions if work_mentions: print "------not empty" for work in work_mentions: print work template_values = { 'searched_word': value, 'work_mentions': work_mentions, 'number_results': number_results, 'time': round(end-start, 4) } self.response.headers['Content-Type'] = 'text/html' self.response.out.write(template.render('templates/index.html', template_values))
def get(self): template_params = {} user = User.checkUser() if not user: self.redirect('/') return template_params['user'] = user.email template_params['logoutUrl'] = user.logoutUrl() word = Word.todaysWord() score = WordScore.getScore(user, word) logging.info("--------- Wrd score: Word: {}, Solved: {}".format( word.word, score.solved)) template_params['definition'] = word.definition if score.attempts >= config.ATTEMPTS_FOR_CLUE: template_params['wordLen'] = word.len template_params['solved'] = score.solved template_params['attempts'] = score.attempts html = template.render("web/templates/game.html", template_params) self.response.write(html)
def _get_all_word_mentions(word_name): """Get all the mentions of a certain word string representation accessed first by work and then by character. Args: word_name: the string representation of the word. Returns: A dictionary of dictionaries, being the first key the work title and the second, the character name. """ all_mentions = {} word = Word.get_by_id(word_name) if not word: return {}, 0 works = Work.query(ancestor=word.key) for work in works: work_chars = Character.query(ancestor=work.key) all_mentions[work.title] = {} for char in work_chars: mentions = char.get_string_mentions() bold_mentions = _bold_mentions(word.name, mentions) all_mentions[work.title][char.name] = bold_mentions return all_mentions, word.count
def get_words(self): return Word.find_by_lesson_id(self.lesson_id)
class DatastoreTest(unittest.TestCase): def setUp(self): ''' Creates an instance of Testbed class and initializes it with the datastore stub. Also creates the entities and stores them in the database.''' self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.word = Word(id="death", name="death", count=2) self.work = Work( parent=self.word.key, id="Hamlet", title="Hamlet", count=1) self.character = Character( parent=self.work.key, id="Claudius", name="Claudius", count=1) line = Line(line='Though yet of Hamlet our dear brother\'s death').put() self.character.mentions = [line] self.word_key = self.word.put() self.work_key = self.work.put() self.character_key = self.character.put() def tearDown(self): '''Deactivate the testbed. This restores the original stubs so that tests do not interfere with each other.''' self.word_key.delete() self.work_key.delete() self.character_key.delete() self.testbed.deactivate() def test_insert_entities(self): '''Ensures that the entities are saved in the database. If we can retrieved they are correctly stored.''' retrieved_word = self.word_key.get() self.assertEqual(2, retrieved_word.count) self.assertEqual(2, retrieved_word.count) retrieved_work = self.work_key.get() self.assertEqual('Hamlet', retrieved_work.title) retrieved_character = self.character_key.get() self.assertEqual('Claudius', retrieved_character.name) self.assertEqual(1, len(retrieved_character.mentions)) self.assertEqual('Though yet of Hamlet our dear brother\'s death', retrieved_character.mentions[0].get().line) def test_searching_a_non_existing_word(self): '''Ensure nothing fails if we search a word that doesn't exist.''' retrieved_word = Word.get_by_id("sdfgfdgdgf") self.assertEqual(retrieved_word, None) def test_filter_entities_using_query_works(self): '''We can search for all the entities starting from a word.''' retrieved_word = Word.get_by_id("death") self.assertEqual('death', retrieved_word.name) self.assertEqual(2, retrieved_word.count) retrieved_works = Work.query(ancestor=self.word.key).fetch() self.assertEqual(len(retrieved_works), 1) work = retrieved_works[0] retrieved_character = Character.query(ancestor=work.key).fetch() self.assertEqual(len(retrieved_character), 1) char = retrieved_character[0] self.assertEqual(1, len(char.mentions)) self.assertEqual("Though yet of Hamlet our dear brother's death", char.mentions[0].get().line)
def get(self): """ 生成背诵列表 使用Cookie来记录用户的Record :return: """ user = self.get_current_user_mongo() # 获取上次未完成的背诵 record = self.get_current_record() if record: # self.write_response(record.format_response()) next_word = record.next_word self.set_secure_cookie("next_word", next_word) self.render("recite.html", record=record.format_response(), next_word=next_word) return # 已背的单词们 # Pipeline pipeline = [ # Stage 1 { "$match": { "user": user.pk, } }, # Stage { "$unwind": "$words" }, # Stage { "$match": { "words.status": WORD_FINISHED } }, # Stage { "$group": { "_id": "$user", "words_recited": {"$addToSet": "$words.word"} } }, # Stage { "$project": { "_id": 0, "words_recited": 1 } } ] logging.info(user.pk) recited_words = Record.objects(user=user, words__status=WORD_FINISHED).aggregate(*pipeline) # 如果有值, 会返回只拥有一个元素的列表 recited_words = list(recited_words) query = [Q(scope__in=[user.scope])] if recited_words: query.append(Q(word__nin=recited_words[0]['words_recited'])) query = reduce(lambda x, y: x & y, query) # 获取新的待背诵单词 wait_words = Word.objects(query).limit(user.quota) # 记录新的背诵记录 new_record = Record( user=user, words=[{"word": word.word, "status": WORD_UNDONE} for word in wait_words] ).save() self.set_secure_cookie("record_id", str(new_record.id)) next_word = new_record.words[0]['word'] # self.set_secure_cookie("next_word", next_word) self.render("recite.html", record=new_record.format_response(), next_word=next_word) return
def add_word(request): word = Word(word=request.new_word, word_id=Word.allocate_ids(1)[0]) word.put() return word.word
def get(self): data = {'title': 'words', 'words': Word.all_word()} self.render('admin/admin_word.html', **data)
def post(self): word = self.get_argument('word', '') translation = self.get_argument('translation', '') Word.create_word(word, translation) self.return_status(200, '添加成功')
def add(): form = request.form.to_dict() Word.new(form) log('add form', form) return redirect(url_for('.index', id=form['wall_id']))
def create_word(self, word): self.create_word_dict(word) print("--Instantiating new word--") self.word = Word(self.word_dict)
def get_words(self, request): """Return all words in the words to be found list""" return WordForms(words=[word.to_form() for word in Word.query()])
def get(self): data = {'title': 'Words | 单词', 'words': Word.all_word()} self.render('purecss_ui/word.html', **data)
db_songs = {} for song in storage.all(Song).values(): db_songs[song.title] = song if db_songs.get(input_song) is None: input_genre = input('Genre: ') image = input('image_url: ') lyrics = input('lyrics: ') song = Song() song.artist = input_artist song.title = input_song song.lyrics = lyrics song.genre = input_genre song.image_url = image else: song = db_songs[input_song] words = input('words: ') word_list = map(str, words.strip('[]').split(',')) db_words = {} for word in storage.all(Word).values(): db_words[word.text] = word for item in word_list: if db_words.get(item) is None: word = Word() word.text = item word.save() song.words.append(word) else: song.words.append(db_words[item]) models.storage.new(song) song.save()
def get(self): """ 生成背诵列表 使用Cookie来记录用户的Record :return: """ user = self.get_current_user_mongo() # 获取上次未完成的背诵 record = self.get_current_record() if record: # self.write_response(record.format_response()) next_word = record.next_word self.set_secure_cookie("next_word", next_word) self.render("recite.html", record=record.format_response(), next_word=next_word) return # 已背的单词们 # Pipeline pipeline = [ # Stage 1 { "$match": { "user": user.pk, } }, # Stage { "$unwind": "$words" }, # Stage { "$match": { "words.status": WORD_FINISHED } }, # Stage { "$group": { "_id": "$user", "words_recited": { "$addToSet": "$words.word" } } }, # Stage { "$project": { "_id": 0, "words_recited": 1 } } ] logging.info(user.pk) recited_words = Record.objects( user=user, words__status=WORD_FINISHED).aggregate(*pipeline) # 如果有值, 会返回只拥有一个元素的列表 recited_words = list(recited_words) query = [Q(scope__in=[user.scope])] if recited_words: query.append(Q(word__nin=recited_words[0]['words_recited'])) query = reduce(lambda x, y: x & y, query) # 获取新的待背诵单词 wait_words = Word.objects(query).limit(user.quota) # 记录新的背诵记录 new_record = Record(user=user, words=[{ "word": word.word, "status": WORD_UNDONE } for word in wait_words]).save() self.set_secure_cookie("record_id", str(new_record.id)) next_word = new_record.words[0]['word'] # self.set_secure_cookie("next_word", next_word) self.render("recite.html", record=new_record.format_response(), next_word=next_word) return
def test_searching_a_non_existing_word(self): '''Ensure nothing fails if we search a word that doesn't exist.''' retrieved_word = Word.get_by_id("sdfgfdgdgf") self.assertEqual(retrieved_word, None)