def resolveDuplicates(word, progressCount): global originalTotalCount global originalDupeCount global originalRelationshipCount if word.owner_id != None: return; dupes = word.getPotentialDuplicates() while(len(dupes)): print(str(progressCount) + " of " + str(Word.totalCount())) print(str(Word.totalCount() - originalTotalCount) + " new words, " + str(Word.dupeCount() - originalDupeCount) + " new dupes, " + str(Word.relationshipCount() - originalRelationshipCount) + " new relationships") i = 1 for item in dupes: print(str(i) + "\t" + str(item)) i += 1 print("Item:\t" + str(word)) print('Select item this is (d)uplicate or (o)wner of, (m)ake a new item combining two, (c)ontinue, or (q)uit') command = input('>').strip() if command[0] == 'd': word.dupeOf(dupes[int(command[1:]) - 1]) word = dupes[int(command[1:]) - 1] elif command[0] == 'o': word.ownerOf(dupes[int(command[1:]) - 1]) elif command[0] == 'm': word = makeComboItem(word, dupes[int(command[1:]) - 1]) elif command[0] == 'c': for dupe in dupes: word.resolve(dupe) elif command[0] == 'q': sys.exit() dupes = word.getPotentialDuplicates()
def add_word(self, request): """Add word to list of words Args: The WordForm objects which include a 'word' Returns: StringMessage: confirming the 'word' has been added as an entity to the Word model Raises: endpoints.BadRequestException: if the word is not a single word or contains special characters and numbers. """ if Word.query(Word.word == request.word).get(): raise endpoints.ConflictException('That word is in the list!') else: word_list = [] temp = request.word.upper() for i in temp: if i == " " or i < 'A' or i > 'Z': raise endpoints.BadRequestException( 'Please Enter One Word!') else: word_list.append(i) w = Word(word=request.word, word_list=word_list) w.put() return StringMessage(message='Added %s to the list!' % request.word)
def add_word(self, widget): # show dialog and get button clicked code dialog = Dialog(self) response = dialog.run() dialog.destroy() if response != Gtk.ResponseType.OK: return # get entered word & translation word = dialog.word.strip() translation = dialog.translation.strip() if word == '' or translation == '': return # insert new word entered in database record = Word(word=word, translation=translation, date=datetime.now()) pk = Word.insert(record) record = Word.retrieve_by_id(pk) # add inserted word to list view store = self.list.get_model() store.append([ record.id, record.word, record.translation, record.date.strftime("%Y-%m-%d %H:%M:%S") ])
def edit_word(self, widget): # get selected row store, iter_list = self.list.get_selection().get_selected() pk = store[iter_list][0] record = Word.retrieve_by_id(pk) # show dialog and get button clicked code dialog = Dialog(self, record.word, record.translation) response = dialog.run() dialog.destroy() if response != Gtk.ResponseType.OK: return # get entered word & translation word = dialog.word.strip() translation = dialog.translation.strip() if word == '' or translation == '': return # update database field according to given column Word.update_by_id(pk, word, translation) # update edited list row store[iter_list][1] = word store[iter_list][2] = translation
def initialiseDB(): try: loc = ("Mywords.xlsx") wb = xlrd.open_workbook(loc) sheet = wb.sheet_by_index(0) meanings = [] # As we are generating options using meanings of other words, we # first get all the meanings and store them for i in range(sheet.nrows): meanings.append(sheet.cell_value(i, 1)) # We insert the words one by one into the database for i in range(1, sheet.nrows): word = sheet.cell_value(i, 0).lower() meaning = sheet.cell_value(i, 1).lower() hint = sheet.cell_value(i, 2).lower() completed = sheet.cell_value(i, 3) options = [] while len(options) <= 3: newChoice = random.choice(meanings) if newChoice not in options: options.append(newChoice) newWord = Word(word=word, meaning=meaning, hint=hint, options=options, completed=completed) newWord.insert() return jsonify({"success": True}) except BaseException: rollback() print("Word formatting error") abort(400)
def makeAnswer(wordText, question, POSText, index): w = Word(question=question, word=wordText, part_of_speech=POSText, index=index) w.save() return w.pk
def remove_word(self, widget): # remove word from database store, iter_list = self.list.get_selection().get_selected() pk = store[iter_list][0] Word.delete_by_id(pk) # remove row from list view store.remove(iter_list)
def save_word(word): word_objects = Word.objects.filter(title=word) if not word_objects: word_object = Word(title=word) word_object.save() else: word_object = word_objects[0] return word_object
def optword(self): query = Word.select() for i in query: name = i.word words = Word.select().where(Word.word == name) if len(words) > 1: for j in words[1:]: j.re2 = 'd' j.save()
def makeWord(word, question, index): part_of_speech = getPartOfSpeech(word) word = getWord(word) w = Word(question=question, word=word, part_of_speech=part_of_speech, index=index) w.save() return w.pk
def create(word: str, description: str) -> dict: """creates word""" result: dict = {} try: word = Word(word=word, description=description) word.save() except IntegrityError: Word.rollback() # raise Exception return result
def news_with_one_company(): positive_words = Word.select().where(Word.is_positive == True) negative_words = Word.select().where(Word.is_positive == False) df = pd.read_csv("./../../data/news/news_with_one_company.csv") df[["sent_score", "word_count", "words", "parsed_sentence"]] = df.apply(sentence_info_pd, axis=1, args=(list(positive_words), list(negative_words))) df.to_csv( "./../../data/news/news_with_one_company_and_sentiment_analysis.csv", index=False)
def remove_word(c): word_id = c.data.split(" ")[1] word = Word.get(Word.id == word_id) word.delete_instance() words = Word.select(Word) keyboard = types.InlineKeyboardMarkup() if len(words) == 0: bot.send_message(cid(c), s.empty_words) for w in words: callback_button = types.InlineKeyboardButton(text=w.word, callback_data=str("remove {}".format(w.id))) keyboard.add(callback_button) bot.edit_message_reply_markup(chat_id=cid(c), message_id=c.message.message_id, reply_markup=keyboard)
def add_word(word): try: existing_word = db.session.query(Word).filter_by(title=word).one() return jsonify({'error': 'Word already exists'}) except NoResultFound as ex: try: new_word = Word() new_word.title = word db.session.add(new_word) db.session.commit() return jsonify({'id': new_word.id}) except ValueError as ex: return jsonify({'error': str(ex)})
def edit_field(self, widget, path, text, column): # get edited row & its old values store = self.list.get_model() row = store[path] pk, word, translation = row[0], row[1], row[2] # update database field according to given column word = text if column == 1 else word translation = text if column == 2 else translation Word.update_by_id(pk, word, translation) # update model with edited text row[column] = text
def word_list(): print(flask.request.method) if flask.request.method == 'PUT': word_list = flask.request.form.get( 'word_list') or flask.request.get_json(force=True).get('word_list') # Remove all words if len(word_list) > 0: Word.objects().delete() # Creates a wordlist for word in word_list: Word(word=word).save() return flask.jsonify( [word.word for word in Word.objects.all().order_by('_id')])
def get_word(title): if not is_emoji(title): return jsonify({'error': 'Title must be a combination of up to 3 emoji'}) try: word = db.session.query(Word).\ options(joinedload('definitions')).\ filter_by(title=title).one() except NoResultFound as ex: word = Word() word.title = title db.session.add(word) db.session.commit() res_word = word.as_dict() def format_definition(definition): formatted = definition.as_dict() user = definition.user.as_dict() del user['oauth_token'] del user['oauth_token_secret'] formatted['user'] = user votes = definition.votes upvotes = 0 downvotes = 0 for vote in votes: if vote.vote is 1: upvotes += 1 else: downvotes += 1 formatted['upvotes'] = upvotes formatted['downvotes'] = downvotes return formatted definitions = [] for definition in word.definitions: formatted = format_definition(definition) definitions.append(formatted) def def_comp(definition): return definition['upvotes'] - definition['downvotes'] sorted_definitions = sorted(definitions, key=def_comp, reverse=True) res_word['definitions'] = sorted_definitions return jsonify({ 'word': res_word })
def new_game(self, request): """Creates new game""" user = User.query(User.name == request.user_name).get() if not user: raise endpoints.NotFoundException( 'A User with that name does not exist!') word = Word.query(Word.word_to_guess == request.word).get() if not word: word = Word(word_to_guess=request.word) word.put() try: game = Game.new_game(user.key, word.key.urlsafe()) return game.to_form('Good luck playing Hangman!') except: raise
def main(): # Parse command line import argparse parser = argparse.ArgumentParser(description='Add words to the SketchWithUs database') parser.add_argument('input', help='file containing one word per line') args = parser.parse_args() # Connect to the database db.connect() # Insert all the words with db.transaction(): for chunk in _grouper(1000, _file_iter(args.input)): print chunk Word.insert_many({'text': x, 'plays': 0, 'wins': 0} for x in chunk).execute()
def writeToFile(): with open('vocab.csv', 'w', newline='', encoding='utf-8') as csvfile: writer = csv.writer(csvfile, delimiter='\t') for word in Word.getAll(): if not word.isDupe(): writer.writerow(word.toArray()) csvfile.close()
def crawl(pixiv_id, password, cron): words = [w for w in Word.select().order_by(Word.id)] crawler = PixivCrawler() crawler.login(pixiv_id, password) for word in words: data = {"word": word, "stored_at": date.today()} try: r = SearchResult.get(word=data["word"], stored_at=data["stored_at"]) if not cron: click.echo( f"【登録済】{word.text} - safe: {r.num_of_safe}件 / r18: {r.num_of_r18}件" ) except SearchResult.DoesNotExist: for mode in ("safe", "r18"): crawler.search(word.text, mode) if mode == "safe": data["safe"] = crawler.get_search_count() else: data["r18"] = crawler.get_search_count() time.sleep(3) SearchResult.create( word=data["word"], stored_at=data["stored_at"], num_of_safe=data["safe"], num_of_r18=data["r18"], ) if not cron: click.echo( f"{word.text} - safe: {data['safe']}件 / r18: {data['r18']}件" )
def word_won(word): try: query = Word.update(wins=Word.wins + 1).where(Word.text == word) query.execute() except: db.rollback() raise
def calc_sim(self, document, query_document): sum_weight_word_in_document_multiply_word_in_query = 0 sum_weight_word_in_document_pow2 = 0 sum_weight_word_in_query_pow2 = 0 for word in query_document.words: query_word = Word.objects(text=word).first() if not query_word: continue weight_word_in_document = self.calc_weight(query_word, document) weight_word_in_query = self.calc_weight(query_word, query_document) sum_weight_word_in_document_multiply_word_in_query += \ weight_word_in_document * weight_word_in_query sum_weight_word_in_document_pow2 += weight_word_in_document**2 sum_weight_word_in_query_pow2 += weight_word_in_query**2 sim = sum_weight_word_in_document_multiply_word_in_query / ( (math.sqrt(sum_weight_word_in_document_pow2) * math.sqrt(sum_weight_word_in_query_pow2)) + 0.0001) return sim
def save(): """ 视图函数需要实现保存单词的功能,如果单词存在则返回单词已存在,否则就保存 :param: 获取输入的单词 :return: 返回提示信息 """ word_name = request.form.get("word") word = Word.query.filter(Word.word == word_name).first() if word: return word + "已存在" else: try: translation = request.form.get("translation") introduction = request.form.get("introduction") star = request.form.get("star") example = request.form.get("words") group_id = Group.query.filter( Group.name == request.form.get("group_name")).first() word_O = Word(word=word_name, translation=translation, introduction=introduction, star=star, group_id=group_id.id, example=example) db.session.add(word_O) db.session.commit() return word_name + "已添加" except Exception as e: db.session.rollback() print(e) return word_name + "失败"
def show_words(self): # load words from database words = Word.retrieve_all() # fill list model with words from database store = Gtk.ListStore(int, str, str, str) for word in words: store.append([ word.id, word.word, word.translation, word.date.strftime("%Y-%m-%d %H:%M:%S") ]) # editable cells for each column cell = Gtk.CellRendererText(editable=False) cell_word = Gtk.CellRendererText(editable=True) cell_translation = Gtk.CellRendererText(editable=True) # add list view to window self.list = Gtk.TreeView(store) column_id = Gtk.TreeViewColumn('Id', cell, text=0) column_word = Gtk.TreeViewColumn('Word', cell_word, text=1) column_translation = Gtk.TreeViewColumn('Translation', cell_translation, text=2) column_date = Gtk.TreeViewColumn('Date', cell, text=3) self.list.append_column(column_id) self.list.append_column(column_word) self.list.append_column(column_translation) self.list.append_column(column_date) self.add(self.list) # connect text cell edited signals cell_word.connect('edited', self.edit_field, 1) cell_translation.connect('edited', self.edit_field, 2)
def get_words(expn, parent, lmk=None, rel=None): words = [] probs = [] entropy = [] for n in expn.split(): if n in NONTERMINALS: if n == parent == 'LANDMARK-PHRASE': # we need to move to the parent landmark lmk = parent_landmark(lmk) # we need to keep expanding expansion, exp_prob, exp_ent = get_expansion(n, parent, lmk, rel) w, w_prob, w_ent = get_words(expansion, n, lmk, rel) words.append(w) probs.append(exp_prob * w_prob) entropy.append(exp_ent + w_ent) else: # get word for POS w_db = Word.get_words(pos=n, lmk=lmk_id(lmk), rel=rel_type(rel)) counter = collections.Counter(w_db) keys, counts = zip(*counter.items()) counts = np.array(counts) counts /= counts.sum() w, w_prob, w_entropy = categorical_sample(keys, counts) words.append(w.word) probs.append(w.prob) entropy.append(w_entropy) p, H = np.prod(probs), np.sum(entropy) print 'expanding %s to %s (p: %f, H: %f)' % (expn, words, p, H) return words, p, H
def test_insert_entity(self): Word(englishWord='Hi', imagePath='hello.jpg', languageName='Italian', translatedWord='Ciao', difficulty=1).put() self.assertEqual(1, len(Word.query().fetch(2)))
def delete_word(limit, terminals, words, lmk=None, rel=None): num_deleted = [] for term, word in zip(terminals, words): # get word for POS num_deleted.append( Word.delete_words(limit, pos=term, word=word, lmk=lmk_id(lmk), rel=rel_type(rel)) ) return num_deleted
def statistic(self): query_l = Lrc.select() print(len(query_l)) query_w = Word.select().where(Word.word != 'd') print(len(query_w)) query_r = Rhyme.select() print(len(query_r))
def words(): if flask.request.method == 'POST': word = flask.request.form.get('word') or flask.request.get_json( force=True).get('word') Word(word=word).save() return flask.jsonify( [word.word for word in Word.objects.all().order_by('_id')])
def _get_or_create_word(self, word): word_query = Word.query.filter(Word.word == word) if word_query.count() == 0: word = Word(word) db.session.add(word) db.session.commit() else: word = word_query[0] return word
def makeComboItem(word1, word2): word = Word() while(word.id == None): print("Make an entry to combine the following words:") print(word1) print(word2) kana = input('kana>') kanji = input('kanji>') definition = input('definition>') word = Word(kana = kana, kanji = kanji, definition = definition) print(word) if 'y' == input("Is this correct? (y/n) "): word.save() word1.dupeOf(word) word2.dupeOf(word) return word
def get_next_word(used=None): try: # Fetch a random word that hasn't been used much subquery = Word.select(fn.Avg(Word.plays)) result = (Word.select().order_by( fn.Random()).where(Word.plays <= subquery)) if used: result = result.where((Word.text << used) == False) result = result[0] # Update its play count query = Word.update(plays=Word.plays + 1).where(Word.id == result.id) query.execute() return result except: db.rollback() raise
def add_words(self, pair_word_and_count): words = pair_word_and_count res = [] for word in words: if not Word.objects.filter(string=word).exists(): res.append(Word(string=word)) Word.objects.bulk_create(res)
def post(self): if request.form.get('add_index', None): list_title = request.form['list_name'] order = (WordList.query().count() + 1) * 10 lst = WordList(order=order, title=str(list_title)) lst.put() elif request.form.get('add_word', None): key = ndb.Key(WordList, int(request.form['index_key'])) word_name = str(request.form['word_name']) words_count = Word.query().filter(Word.list == key).count() + 1 w = Word(list=key, word=word_name, order=words_count * 2) w.put() return self.get()
def get(self): word_index_list = WordList.query().order(WordList.order).fetch() word_list = Word.query().fetch() return render_template( 'admin/form.html', **{ 'word_index_list': word_index_list, 'word_list': word_list })
def load_words(session, directory): folder = os.path.join(directory, 'words') for filename in os.listdir(folder): if filename.endswith('.csv'): with open(os.path.join(folder, filename), newline='') as csv_file: reader = csv.DictReader(csv_file, delimiter=',', quotechar='"') for word_csv in reader: word = Word.create(session, **word_csv) session.add(word) session.commit()
def get_next_word(used=None): try: # Fetch a random word that hasn't been used much subquery = Word.select(fn.Avg(Word.plays)) result = (Word.select() .order_by(fn.Random()) .where(Word.plays <= subquery)) if used: result = result.where((Word.text << used) == False) result = result[0] # Update its play count query = Word.update(plays=Word.plays + 1).where(Word.id == result.id) query.execute() return result except: db.rollback() raise
def check_search_word(self, word): word = Word(word) if word.is_valid: if self.previous != word.value: self.previous = word.value else: print(same_word_hint) self.search_word(word) else: self.clear(word, 'Invalid word.')
def load_vn_dict(): # setting DB engine = create_engine(get_db_connect_string()) conn = engine.connect() vn_dict = VNDict.get_instance() # load verbs verb_query_string_statement = text("SELECT PosScore, NegScore, SynsetTerm FROM verb") verbs = conn.execute(verb_query_string_statement).fetchall() for v in verbs: if v.PosScore > v.NegScore: word_score = v.PosScore word_kind = WordKindEnum.POS else: word_score = v.NegScore word_kind = WordKindEnum.NEG word_type = WordTypeEnum.VERB word_text = v.SynsetTerm.lower().strip(Setting.NONWORD_CHARACTERS) verb_w = Word(word_text, word_score, word_kind, word_type) vn_dict.add(verb_w) # load adjectives adj_query_string_statement = text("SELECT PosScore, NegScore, Adj_Key FROM adj") adjectives = conn.execute(adj_query_string_statement).fetchall() for adj in adjectives: if adj.PosScore > adj.NegScore: word_score = adj.PosScore word_kind = WordKindEnum.POS else: word_score = adj.NegScore word_kind = WordKindEnum.NEG word_type = WordTypeEnum.ADJ word_text = adj.Adj_Key.lower().strip(Setting.NONWORD_CHARACTERS) adj_w = Word(word_text, word_score, word_kind, word_type) vn_dict.add(adj_w)
def _get_words(self, integ): query = Word.select().where(Word.rhyme == integ) if len(query) < 2: return r = [] for i in query: r.append((i.word, i.re3+1)) return sorted(r, key=lambda x:x[1], reverse=True)
def save_tree(tree, loc, rel, lmk, parent=None): if len(tree.productions()) == 1: # if this tree only has one production # it means that its child is a terminal (word) word = Word() word.word = tree[0] word.pos = tree.node word.parent = parent word.location = loc else: prod = Production() prod.lhs = tree.node prod.rhs = ' '.join(n.node for n in tree) prod.parent = parent prod.location = loc # some productions are related to semantic representation if prod.lhs == 'RELATION': prod.relation = rel_type(rel) if hasattr(rel, 'measurement'): prod.relation_distance_class = rel.measurement.best_distance_class prod.relation_degree_class = rel.measurement.best_degree_class elif prod.lhs == 'LANDMARK-PHRASE': prod.landmark = lmk_id(lmk) prod.landmark_class = lmk.object_class prod.landmark_orientation_relations = get_lmk_ori_rels_str(lmk) prod.landmark_color = lmk.color # next landmark phrase will need the parent landmark lmk = parent_landmark(lmk) elif prod.lhs == 'LANDMARK': # LANDMARK has the same landmark as its parent LANDMARK-PHRASE prod.landmark = parent.landmark prod.landmark_class = parent.landmark_class prod.landmark_orientation_relations = parent.landmark_orientation_relations prod.landmark_color = parent.landmark_color # save subtrees, keeping track of parent for subtree in tree: save_tree(subtree, loc, rel, lmk, prod)
def token_value(self, token): energy = 10 word = None if token not in self.words: try: word = Word.get(word=token) except Word.DoesNotExist: word = Word.create(word = token, last_used = datetime.now(), times = 1) else: scale = self.energy_scale(self.words[token].last_used, datetime.now()) energy *= scale self.words[token].times += 1 logger.debug("[{}] Scale: {} | Energy: {}".format(token, scale, energy)) # After all that, save the word. word.save() if energy < 1: energy = 1 return energy
def parse_tldr(f): """ Generative TLDR iterable parser (it works on lists too). >>> for word in parse_tldr([ "test|testing|he tests|easy" ]): ... print word.word test """ for i, line in enumerate(f): line = line.strip() if line[0] == "#": continue if not line: continue try: yield Word.deserialize(line) except Exception as e: raise ParserError(i + 1, e)
def test_static_class_method_get_count(self): """the static class method Word.get_count() makes it possible to conveiently ask the model for a count.""" # defaults to 0 when it doesn't exist self.assertEqual(Word.get_count("foo"), 0) self.assertEqual(Word.get_count("foo", language="es"), 0) views.incr_word("foo") self.assertEqual(Word.get_count("foo"), 1) self.assertEqual(Word.get_count("foo", language="es"), 0) views.incr_word("foo", language="es") self.assertEqual(Word.get_count("foo"), 1) self.assertEqual(Word.get_count("foo", language="es"), 1)
def test_filter_by_language(self): #Add two words, one French and one Italian Word(englishWord='Hi', imagePath='hello.jpg', languageName='Italian', translatedWord='Ciao', difficulty=1).put() Word(englishWord='Hi', imagePath='hello.jpg', languageName='French', translatedWord='Salut', difficulty=1).put() #Get french words query = Word.query(Word.languageName == 'French') results = query.fetch() #Check to see if only 1 result was returned self.assertEqual(1, len(results)) #Check if the word is French language self.assertEqual('French', results[0].languageName)
def get_definitions(request): word = request.GET.get('word') definitions = definition(word) response_data = {} new_word = Word(text=word) if len(definitions) >= 1: new_word.definition1 = definitions[0] if len(definitions) >= 2: new_word.definition2 = definitions[1] if len(definitions) >= 3: new_word.definition3 = definitions[2] new_word.save() for i in range(len(definitions)): response_data[i] = definitions[i] return HttpResponse(json.dumps(response_data), content_type='application/json')
def add_comments(): """ Create db entries for comments :return: """ comments = get_articles_comments(get_hot_article_ids(), get_ids=True) for index, comment in enumerate(comments): logger.info("[VIEW COMMENT {}]".format(index)) comment_id, comment_text = comment print("[VIEW] Comment {}".format(index)) comment_obj, created = Comment.add(comment_id, comment_text) if not created and comment_obj.text == comment_text: continue counted_words = get_words_count(comment_text) # delete old words if not created: words_to_delete = ( db.session.query(WordsCount) .filter( WordsCount.comment == comment_obj, ~WordsCount.word.has(Word.text.in_([s[0] for s in counted_words])), ) .all() ) if words_to_delete: for word in words_to_delete: db.session.delete(word) for word, count in counted_words: word_obj, created = Word.add(word) WordsCount.add(comment_obj, word_obj, count) db.session.commit() return render_template("stats.html", data=comments)
def add_synonyms(self, kSynonyms, sSpace = 'global'): try: oSpace = Space.objects.get(name=sSpace) except Space.DoesNotExist: oSpace = Space(name='global') oSpace.save() for sWord, kSyns in kSynonyms.iteritems(): try: oWord = Word.objects.get(name=sWord, space=oSpace) except Word.DoesNotExist: oWord = Word(name=sWord, space=oSpace) oWord.save() for sSyn, kProps in kSyns.iteritems(): try: oSyn = Word.objects.get(name=sSyn, space=oSpace) except Word.DoesNotExist: oSyn = Word(name=sSyn, space=oSpace) oSyn.save() try: oRelation = Relation.objects.get(rel_one=oWord, rel_two=oSyn) oRelation.karma += kProps['weight'] oRelation.save() except Relation.DoesNotExist: try: oRelation = Relation.objects.get(rel_two=oWord, rel_one=oSyn) oRelation.karma += kProps['weight'] oRelation.save() except Relation.DoesNotExist: oRelation = Relation(rel_one=oWord, rel_two=oSyn, karma=kProps['weight'], space=oSpace) oRelation.save()
def save(self): for word in self._trained_words: Word.set_word(word, self.nwords[word], language=self.language)
def __init__(self): # initialize word bank key = Word.query().get(keys_only=True) if key is None: # import word bank from file Word.import_words()
def populate(filepath, folder, chapter, section): data = [] with open(filepath, newline='', encoding='utf-8') as csvfile: reader = csv.reader(csvfile, delimiter='\t') for row in reader: Word(kana = row[0], kanji = row[1], definition = row[2], book = folder, chapter = chapter, section = section).save() def writeToFile(): with open('vocab.csv', 'w', newline='', encoding='utf-8') as csvfile: writer = csv.writer(csvfile, delimiter='\t') for word in Word.getAll(): if not word.isDupe(): writer.writerow(word.toArray()) csvfile.close() originalTotalCount = Word.totalCount() originalDupeCount = Word.dupeCount() originalRelationshipCount = Word.relationshipCount() for (path, dirs, files) in os.walk('./vocab'): book = path.split('./vocab')[1][1:] for filename in files: filepath = os.path.join(path, filename) chapter = str(int(filename.split('.')[0])) section = filename.split('.')[1] populate(filepath, book, chapter, section) print("Database synced") print(str(Word.totalCount() - originalTotalCount) + " new records, " + str(Word.totalCount()) + " total records") i=1
def getWordByName(self, wordName): try: data = self.dictionary[wordName].decode('utf-8', 'ignore') except KeyError, e: print u'未找到单词:%s' % "".join(e.message) return None else: if data[0] == '*': phonetic, interp = data.split('\n', 1) phonetic = phonetic[1:] else: phonetic = "" interp = data word = Word() word.name = wordName word.phonetic = phonetic word.interp = interp return word def getRandomWord(self): # 随机取词 name = "".join(random.choice(self.dictionary.idx._idx.keys())) word = self.getWordByName(name) return word class RecordManager: def __init__(self, recordPath, dictManager): self.recordPath = recordPath
def _get_instance(self): _id = self.request.GET.get('id', None) if _id: self.instance = Word.get_by_id(long(_id)) else: self.instance = None
def get(self): dictionary = int(self.request.GET.get('dict', 0)) words = Word.all_by_dictionary(dictionary).order('original').fetch(100) html = render('list.html', {'words': words}) self.response.out.write(html)
def get_word(self): word_id = self.request.GET.get('id', None) if not word_id: return None return Word.get_by_id(long(word_id))