Пример #1
0
def get_overall_familiarity():
    word_id, = json_check.check_fields(request.json, 'word_id')
    reviews = Word.get_by_id(word_id).reviews.order_by(
        Review.timestamp.desc()).limit(10).all()

    return jsonify(overall_familiarity=sum([r.familiarity
                                            for r in reviews]) / len(reviews))
Пример #2
0
    def test_word_foreign_key(self):
        wi = WordInfo.get(1)
        wi.overall_familiarity = 2

        db.session.add(wi)
        db.session.commit()

        a = Word.get_by_id(1).info
        self.assertEqual(len(a), 1)
        self.assertEqual(a[0].overall_familiarity, 2)
Пример #3
0
def calculate_space_interval(word_id, from_scratch=False):
    w = Word.get_by_id(word_id)
    wi = WordInfo.get(word_id)

    query = Review.query.filter_by(word_id=word_id).order_by(Review.timestamp)

    reviews = []
    date = None
    for rv in query.all():
        if date != rv.timestamp.date():
            date = rv.timestamp.date()
            reviews.append(rv)

    # todo: differentiate algorithm for whether from scratch

    smi_i = count(1)
    ef = 2.5
    period = None
    for rv in reviews:
        # Update ef
        q = rv.familiarity
        ef += 0.1 - (5 - q) * (0.08 + (5 - q) * 0.02)
        if ef < 1.3:
            ef = 1.3

        # Calculate period
        if rv.familiarity < 3:
            smi_i = count(1)

        sm_i = smi_i.__next__()
        if sm_i == 1:
            period = 1
        elif sm_i == 2:
            period = 6
        else:
            period = period * ef

    return round(period)
Пример #4
0
def insert_json(db, json_path):
    json_path = os.path.expanduser(json_path)
    with open(json_path, encoding='utf-8') as f:
        son = json.load(f)
    """
    [{
            word, phonetic
            explanation: [{PoS, Chinese, English}, ...]
            pronunciation: {content, type, encoding:base64}
            tags: [list?, unit?]
    }, ...]
    """
    for word in son:
        w = Word(front=word['word'],
                 phonetic=word['phonetic'],
                 explanation=json.dumps(word['explanation']),
                 back='; '.join([
                     '%s %s' % (exp['PoS'], exp['Chinese'])
                     for exp in word['explanation']
                 ]),
                 pronunciation=word['pronunciation']['content'])
        db.session.add(w)
        db.session.commit()
        print("Added [{}]".format(w.front))
Пример #5
0
with open('Lexique381.txt', 'r', encoding='utf-8') as source:
    content = source.readline()
    content2 = source.readlines()
    for line in content2:
        list = line.split('\t')
        ortho = list[0]
        phono = list[1]
        lemme = list[2]
        cgram = list[3]
        genre = list[4]
        nombre = list[5]
        freqlemfilms = float(list[6])
        freqlemlivres = float(list[7])
        freqfilms = float(list[8])
        freqlivres = float(list[9])
        infover = list[10]
        word = Word(ortho=ortho,
                    phono=phono,
                    lemme=lemme,
                    cgram=cgram,
                    genre=genre,
                    freqlemfilms=freqlemfilms,
                    freqlemlivres=freqlemlivres,
                    freqfilms=freqfilms,
                    freqlivres=freqlivres,
                    infover=infover)
        session.add(word)
    session.commit()
    session.close()
Пример #6
0
    def test_get_by_id(self):
        w = Word.get_by_id(1)

        self.assertEqual(w.id, 1)
        self.assertEqual(w.front, 'abandon')
Пример #7
0
 def test_word_foreign_key_no_info(self):
     a = Word.get_by_id(1).info
     self.assertEqual(a, [])