Exemple #1
0
    def populateData(self, idxs):
        if len(idxs) == 0:
            return map(
                list,
                DB.fetchall("""
            select s.rowid,s.name,t.count,r.count,r.wpm,ifelse(nullif(t.dis,t.count),'No','Yes')
                    from source as s
                    left join (select source,count(*) as count,count(disabled) as dis from text group by source) as t
                        on (s.rowid = t.source)
                    left join (select source,count(*) as count,avg(wpm) as wpm from result group by source) as r
                        on (t.source = r.source)
                    where s.disabled is null
                    order by s.name"""))

        if len(idxs) > 1:
            return []

        r = self.rows[idxs[0]]

        return map(
            list,
            DB.fetchall(
                """select t.rowid,substr(t.text,0,40)||"...",length(t.text),r.count,r.m,ifelse(t.disabled,'Yes','No')
                from (select rowid,* from text where source = ?) as t
                left join (select text_id,count(*) as count,agg_median(wpm) as m from result group by text_id) as r
                    on (t.id = r.text_id)
                order by t.rowid""", (r[0], )))
    def populateData(self, idxs):
        if len(idxs) == 0:
            return map(
                list,
                DB.fetchall(
                    """
            select s.rowid,s.name,t.count,r.count,r.wpm,ifelse(nullif(t.dis,t.count),'No','Yes')
                    from source as s
                    left join (select source,count(*) as count,count(disabled) as dis from text group by source) as t
                        on (s.rowid = t.source)
                    left join (select source,count(*) as count,avg(wpm) as wpm from result group by source) as r
                        on (t.source = r.source)
                    where s.disabled is null
                    order by s.name"""
                ),
            )

        if len(idxs) > 1:
            return []

        r = self.rows[idxs[0]]

        return map(
            list,
            DB.fetchall(
                """select t.rowid,substr(t.text,0,40)||"...",length(t.text),r.count,r.m,ifelse(t.disabled,'Yes','No')
                from (select rowid,* from text where source = ?) as t
                left join (select text_id,count(*) as count,agg_median(wpm) as m from result group by text_id) as r
                    on (t.id = r.text_id)
                order by t.rowid""",
                (r[0],),
            ),
        )
Exemple #3
0
 def addFromTyped(self):
     words = [
         x[0] for x in DB.fetchall(
             'select distinct data from statistic where type = 2 order by random()'
         )
     ]
     self.filterWords(words)
Exemple #4
0
    def update(self):
        self.progress_.show()
        n_text = DB.fetchone("""select count(*) from text""", (0,))[0]
        self.progress_.inc(2)
        n_res = DB.fetchone("""select count(*) from result""", (0,))[0]
        self.progress_.inc(2)
        n_words = DB.fetchall(
            """select count(*),sum(count) from statistic
            group by type order by type"""
        )
        self.progress_.inc(2)
        if len(n_words) != 3:
            n_words = [(0, 0), (0, 0), (0, 0)]
        n_first = DB.fetchone("""select w from result order by w asc limit 1""", (time.time(),))[0]
        self.progress_.hide()

        self.stats_.setText(
            locale.format_string(
                """Texts: %d
Results: %d
Analysis data: %d (%d keys, %d trigrams, %d words)
  %d characters and %d words typed total\n"""
                + ("First result was %.2f days ago.\n" % ((time.time() - n_first) / 86400.0)),
                tuple(
                    [n_text, n_res, sum(map(lambda x: x[0], n_words))]
                    + map(lambda x: x[0], n_words)
                    + [n_words[0][1], n_words[2][1]]
                ),
                True,
            )
        )
Exemple #5
0
    def cleanup(self):
        s_in_day = 24*60*60
        now = time.time()
        pending = []

        for idx, grp, lim in [
                (1, 30, Settings.get("group_month")),
                (2, 7, Settings.get("group_week")),
                (3, 1, Settings.get("group_day")),
            ]:

            minimum = now - s_in_day * lim
            binsize = s_in_day * grp

            pending.extend(DB.fetchall(f"""
                select avg(w), data, type, agg_mean(time, count), sum(count), sum(mistakes),
                    agg_median(viscosity)
                from statistic where w <= {minimum}
                group by data, type, cast(w/{binsize} as int)"""))
            self.progressbar.set_fraction(idx/5)

        DB.executemany("""insert into statistic (w, data, type, time, count, mistakes, viscosity)
            values (?,?,?,?,?,?,?)""", pending)
        self.progressbar.set_fraction(4/5)
        # FIXME vacuum not supported
        # DB.execute("vacuum")
        self.progressbar.set_fraction(5/5)
        DB.commit()
        self.progressbar.set_fraction(0)
Exemple #6
0
    def update(self):
        self.progress_.show()
        n_text = DB.fetchone('''select count(*) from text''', (0, ))[0]
        self.progress_.inc(2)
        n_res = DB.fetchone('''select count(*) from result''', (0, ))[0]
        self.progress_.inc(2)
        n_words = DB.fetchall('''select count(*), sum(count) from statistic
            group by type order by type''')
        self.progress_.inc(2)
        if len(n_words) != 3:
            n_words = [(0, 0), (0, 0), (0, 0)]
        n_first = DB.fetchone(
            '''select w from result order by w asc limit 1''',
            (time.time(), ))[0]
        self.progress_.hide()

        self.stats_.setText(
            locale.format_string(
                '''Texts: %d
Results: %d
Analysis data: %d (%d keys, %d trigrams, %d words)
  %d characters and %d words typed total\n''' +
                ("First result was %.2f days ago.\n" %
                 ((time.time() - n_first) / 86400.0)),
                tuple([n_text, n_res,
                       sum(map(lambda x: x[0], n_words))] +
                      list(map(lambda x: x[0], n_words)) +
                      [n_words[0][1], n_words[2][1]]), True))
Exemple #7
0
    def cleanup(self):
        day = 24 * 60 * 60
        now = time.time()
        q = []

        self.progress_.show()
        for grp, lim in [(30.0, Settings.get('group_month')),
                         (7.0, Settings.get('group_week')),
                         (1.0, Settings.get('group_day'))]:

            w = now - day * lim
            g = grp * day
            q.extend(
                DB.fetchall('''
                select avg(w), data, type, agg_mean(time, count), sum(count), sum(mistakes), agg_median(viscosity)
                from statistic where w <= %f
                group by data, type, cast(w/%f as int)''' % (w, g)))
            self.progress_.inc()

            DB.execute('''delete from statistic where w <= ?''', (w, ))
            self.progress_.inc()

        DB.executemany(
            '''insert into statistic (w, data, type, time, count, mistakes, viscosity)
            VALUES (?, ?, ?, ?, ?, ?, ?)''', q)
        self.progress_.inc()
        DB.commit()
        DB.execute('vacuum')
        self.progress_.inc()
        self.progress_.hide()
Exemple #8
0
    def cleanup(self):
        day = 24*60*60
        now = time.time()
        q = []

        self.progress_.show()
        for grp, lim in [(30.0, Settings.get('group_month')),
                (7.0, Settings.get('group_week')),
                (1.0, Settings.get('group_day'))]:

            w = now - day*lim
            g = grp * day
            q.extend(DB.fetchall('''
                select avg(w), data, type, agg_mean(time, count), sum(count), sum(mistakes), agg_median(viscosity)
                from statistic where w <= %f
                group by data, type, cast(w/%f as int)''' % (w, g)))
            self.progress_.inc()

            DB.execute('''delete from statistic where w <= ?''', (w, ))
            self.progress_.inc()

        DB.executemany('''insert into statistic (w, data, type, time, count, mistakes, viscosity)
            VALUES (?, ?, ?, ?, ?, ?, ?)''', q)
        self.progress_.inc()
        DB.execute('vacuum')
        self.progress_.inc()
        DB.commit()
        self.progress_.hide()
Exemple #9
0
 def addFromTyped(self):
     words = [
         x[0]
         for x in DB.fetchall(
             "select distinct data from statistic where type = 2 order by random()"
         )
     ]
     self.filterWords(words)
Exemple #10
0
    def updateData(self, *args):
        if self.editflag:
            return
        where = []
        if self.cb_source.currentIndex() <= 0:
            pass
        elif self.cb_source.currentIndex() == 1:  # last text
            where.append(
                "r.text_id = (select text_id from result order by w desc limit 1)"
            )
        elif self.cb_source.currentIndex() == 2:  # all texts
            where.append("s.discount is null")
        elif self.cb_source.currentIndex() == 3:  # all lessons texts
            where.append("s.discount is not null")
        else:
            s = self.cb_source.itemData(self.cb_source.currentIndex())
            where.append("r.source = %d" % s.toInt()[0])

        if len(where) > 0:
            where = "where " + " and ".join(where)
        else:
            where = ""

        g = Settings.get("perf_group_by")
        if g == 0:  # no grouping
            sql = """select text_id,w,s.name,wpm,100.0*accuracy,viscosity
                from result as r left join source as s on (r.source = s.rowid)
                %s %s
                order by w desc limit %d"""
        elif g:
            sql = """select agg_first(text_id),avg(r.w) as w,count(r.rowid) || ' result(s)',agg_median(r.wpm),
                        100.0*agg_median(r.accuracy),agg_median(r.viscosity)
                from result as r left join source as s on (r.source = s.rowid)
                %s %s
                order by w desc limit %d"""

        group = ""
        if g == 1:  # by Settings.get('def_group_by')
            DB.resetCounter()
            gn = Settings.get("def_group_by")
            if gn <= 1:
                gn = 1
            group = "group by cast(counter()/%d as int)" % gn
        elif g == 2:  # by sitting
            mis = Settings.get("minutes_in_sitting") * 60.0
            DB.resetTimeGroup()
            group = "group by time_group(%f, r.w)" % mis
        elif g == 3:  # by day
            group = "group by cast((r.w+4*3600)/86400 as int)"

        n = Settings.get("perf_items")

        sql = sql % (where, group, n)

        self.model.setData(map(list, DB.fetchall(sql)))
        self.updateGraph()
Exemple #11
0
    def updateData(self, *args):
        if self.editflag:
            return
        where = []
        if self.cb_source.currentIndex() <= 0:
            pass
        elif self.cb_source.currentIndex() == 1:  # last text
            where.append(
                'r.text_id = (select text_id from result order by w desc limit 1)'
            )
        elif self.cb_source.currentIndex() == 2:  # all texts
            where.append('s.discount is null')
        elif self.cb_source.currentIndex() == 3:  # all lessons texts
            where.append('s.discount is not null')
        else:
            s = self.cb_source.itemData(self.cb_source.currentIndex())
            where.append('r.source = %d' % s.toInt()[0])

        if len(where) > 0:
            where = 'where ' + ' and '.join(where)
        else:
            where = ""

        g = Settings.get('perf_group_by')
        if g == 0:  # no grouping
            sql = '''select text_id,w,s.name,wpm,100.0*accuracy,viscosity
                from result as r left join source as s on (r.source = s.rowid)
                %s %s
                order by w desc limit %d'''
        elif g:
            sql = '''select agg_first(text_id),avg(r.w) as w,count(r.rowid) || ' result(s)',agg_median(r.wpm),
                        100.0*agg_median(r.accuracy),agg_median(r.viscosity)
                from result as r left join source as s on (r.source = s.rowid)
                %s %s
                order by w desc limit %d'''

        group = ''
        if g == 1:  # by Settings.get('def_group_by')
            DB.resetCounter()
            gn = Settings.get('def_group_by')
            if gn <= 1:
                gn = 1
            group = "group by cast(counter()/%d as int)" % gn
        elif g == 2:  # by sitting
            mis = Settings.get('minutes_in_sitting') * 60.0
            DB.resetTimeGroup()
            group = "group by time_group(%f, r.w)" % mis
        elif g == 3:  # by day
            group = "group by cast((r.w+4*3600)/86400 as int)"

        n = Settings.get("perf_items")

        sql = sql % (where, group, n)

        self.model.setData(map(list, DB.fetchall(sql)))
        self.updateGraph()
Exemple #12
0
 def refresh_sources(self):
     self.editflag = True
     self.cb_source.remove_all()
     self.cb_source.append("all", "<ALL>")
     self.cb_source.append("last text", "<LAST TEXT>")
     self.cb_source.append("all texts", "<ALL TEXTS>")
     self.cb_source.append("all lessons", "<ALL LESSONS>")
     for rid, label in DB.fetchall(
             "select rowid,abbreviate(name,30) from source order by name"):
         self.cb_source.append(str(rid), label)
     self.editflag = False
    def doubleClicked(self, idx):
        p = idx.parent()
        if not p.isValid():
            return

        q = self.model.data(idx, Qt.UserRole)
        v = DB.fetchall('select id,source,text from text where rowid = ?', (q[0], ))

        self.cur = v[0] if len(v) > 0 else self.defaultText
        self.emit(SIGNAL("setText"), self.cur)
        self.emit(SIGNAL("gotoText"))
Exemple #14
0
 def populate_data(self):
     self.clear()
     for source in DB.fetchall("""
         select s.rowid,s.name,t.count,r.count,r.wpm,ifelse(nullif(t.dis,t.count),'No','Yes')
             from source as s
             left join (select source,count(*) as count,count(disabled) as dis from text group by source) as t
                 on (s.rowid = t.source)
             left join (select source,count(*) as count,avg(wpm) as wpm from result group by source) as r
                 on (t.source = r.source)
             where s.disabled is null
             order by s.name"""):
         s_iter = self.append(None, list(source))
         for text in DB.fetchall(
                 """
             select t.rowid,substr(t.text,0,40)||"...",length(t.text),r.count,r.m,ifelse(t.disabled,'Yes','No')
             from (select rowid,* from text where source = ?) as t
             left join (select text_id,count(*) as count,agg_median(wpm) as m from result group by text_id) as r
                 on (t.id = r.text_id)
             order by t.rowid""", (source[0], )):
             self.append(s_iter, list(text))
Exemple #15
0
    def refreshSources(self):
        self.editflag = True
        self.cb_source.clear()
        self.cb_source.addItem("<ALL>")
        self.cb_source.addItem("<LAST TEXT>")
        self.cb_source.addItem("<ALL TEXTS>")
        self.cb_source.addItem("<ALL LESSONS>")

        for id, v in DB.fetchall('select rowid,abbreviate(name,30) from source order by name'):
            self.cb_source.addItem(v, QVariant(id))
        self.editflag = False
Exemple #16
0
    def doubleClicked(self, idx):
        p = idx.parent()
        if not p.isValid():
            return

        q = self.model.data(idx, Qt.UserRole)
        v = DB.fetchall("select id,source,text from text where rowid = ?", (q[0],))

        self.cur = v[0] if len(v) > 0 else self.defaultText
        self.emit(SIGNAL("setText"), self.cur)
        self.emit(SIGNAL("gotoText"))
Exemple #17
0
    def double_clicked(self, treeview, where, _column):
        model = treeview.get_model()
        if model.iter_depth(model.get_iter(where)) == 0:
            return

        row = Gtk.TreeModelRow(model, where)
        tgts = DB.fetchall("select id,source,text from text where rowid = ?",
                           (row[0], ))

        cur = tgts[0] if tgts else self.default_text
        self.emit("set-text", *cur)
        self.emit("go-to-text")
Exemple #18
0
    def update_data(self):
        if self.editflag:
            return
        where = []
        where_query = ""
        selected = self.cb_source.get_active_id()
        if selected == "last text":
            where.append(
                "r.text_id = (select text_id from result order by w desc limit 1)"
            )
        elif selected == "all texts":
            where.append("s.discount is null")
        elif selected == "all lessons":
            where.append("s.discount is not null")
        elif selected and selected.isdigit():
            rowid = int(selected)
            where.append(f"r.source = {rowid}")

        if where:
            where_query = "where " + " and ".join(where)

        sql_template = """select agg_first(text_id),avg(r.w) as w,count(r.rowid)
                || ' result(s)',agg_median(r.wpm),
                100.0*agg_median(r.accuracy),agg_median(r.viscosity)
            from result as r left join source as s on (r.source = s.rowid)
            %s %s
            order by w desc limit %d"""

        groupby = Settings.get("perf_group_by")
        group = ""
        print(groupby)
        if groupby == 1:  # by def_group_by
            DB.reset_counter()
            group = "group by cast(counter()/%d as int)" % max(
                Settings.get("def_group_by"), 1)
        elif groupby == 2:  # by sitting
            mis = Settings.get("minutes_in_sitting") * 60.0
            DB.reset_time_group()
            group = "group by time_group(%f, r.w)" % mis
        elif groupby == 3:  # by day
            group = "group by cast((r.w+4*3600)/86400 as int)"
        elif not groupby:  # no grouping
            sql_template = """select text_id,w,s.name,wpm,100.0*accuracy,viscosity
                from result as r left join source as s on (r.source = s.rowid)
                %s %s
                order by w desc limit %d"""

        items = Settings.get("perf_items")

        sql = sql_template % (where_query, group, items)
        self.model.set_stats([list(r) for r in DB.fetchall(sql)])
        self.update_graph()
Exemple #19
0
    def __init__(self, *args):
        super(StringStats, self).__init__(*args)

        self.model = WordModel()
        tw = AmphTree(self.model)
        tw.setIndentation(0)
        tw.setUniformRowHeights(True)
        tw.setRootIsDecorated(False)
        self.stats = tw

        ob = SettingsCombo('ana_which', [
            ('wpm asc', 'slowest'),
            ('wpm desc', 'fastest'),
            ('viscosity desc', 'least fluid'),
            ('viscosity asc', 'most fluid'),
            ('accuracy asc', 'least accurate'),
            ('misses desc', 'most mistyped'),
            ('total desc', 'most common'),
            ('damage desc', 'most damaging'),
        ])

        wc = SettingsCombo('ana_what', ['keys', 'trigrams', 'words'])
        lim = SettingsEdit('ana_many')

        s = DB.fetchall("select rowid, name from source")
        source = SettingsCombo('ana_source', [(-1, 'all')] + s)
        self.w_count = SettingsEdit('ana_count')

        self.connect(Settings, SIGNAL("change_ana_which"), self.update)
        self.connect(Settings, SIGNAL("change_ana_what"), self.update)
        self.connect(Settings, SIGNAL("change_ana_many"), self.update)
        self.connect(Settings, SIGNAL("change_ana_count"), self.update)
        self.connect(Settings, SIGNAL("history"), self.update)

        self.setLayout(
            AmphBoxLayout(
                [[
                    "Display statistics about the", ob, wc, "from ", source,
                    None,
                    AmphButton("Update List", self.update)
                ],
                 [
                     "Limit list to", lim, "items of count ", self.w_count,
                     " and older than",
                     SettingsEdit("history"), "hours", None,
                     AmphButton(
                         "Send List to Lesson Generator",
                         lambda: self.emit(SIGNAL("lessonStrings"),
                                           [x[0] for x in self.model.words]))
                 ], (self.stats, 1)]))
Exemple #20
0
    def update(self):
        which = Settings.get("ana_which")
        what = Settings.get("ana_what")
        limit = Settings.get("ana_many")
        least = Settings.get("ana_count")
        hist = time.time() - Settings.get("history") * 86400.0

        sql = f"""select data,12.0/time as wpm,
            100.0-100.0*misses/cast(total as real) as accuracy,
            viscosity,total,misses,
            total*time*time*(1.0+misses/total) as damage
                from
                    (select data,agg_median(time) as time,agg_median(viscosity) as viscosity,
                    sum(count) as total,sum(mistakes) as misses
                    from statistic where w >= ? and type = ? group by data)
                where total >= ?
                order by {which} limit {limit}"""

        self.model.set_stats(DB.fetchall(sql, (hist, what, least)))
Exemple #21
0
    def __init__(self, *args):
        super(StringStats, self).__init__(*args)

        self.model = WordModel()
        tw = AmphTree(self.model)
        tw.setIndentation(0)
        tw.setUniformRowHeights(True)
        tw.setRootIsDecorated(False)
        self.stats = tw

        ob = SettingsCombo('ana_which', [
                    ('wpm asc', 'slowest'),
                    ('wpm desc', 'fastest'),
                    ('viscosity desc', 'least fluid'),
                    ('viscosity asc', 'most fluid'),
                    ('accuracy asc', 'least accurate'),
                    ('misses desc', 'most mistyped'),
                    ('total desc', 'most common'),
                    ('damage desc', 'most damaging'),
                    ])

        wc = SettingsCombo('ana_what', ['keys', 'trigrams', 'words'])
        lim = SettingsEdit('ana_many')

        s = DB.fetchall("select rowid, name from source")
        source = SettingsCombo('ana_source', [
            (-1, 'all')
        ] + s)
        self.w_count = SettingsEdit('ana_count')

        self.connect(Settings, SIGNAL("change_ana_which"), self.update)
        self.connect(Settings, SIGNAL("change_ana_what"), self.update)
        self.connect(Settings, SIGNAL("change_ana_many"), self.update)
        self.connect(Settings, SIGNAL("change_ana_count"), self.update)
        self.connect(Settings, SIGNAL("history"), self.update)

        self.setLayout(AmphBoxLayout([
                ["Display statistics about the", ob, wc, "from ", source, None, AmphButton("Update List", self.update)],
                ["Limit list to", lim, "items of count ", self.w_count," and older than", SettingsEdit("history"), "hours",
                    None, AmphButton("Send List to Lesson Generator",
                         lambda: self.emit(SIGNAL("lessonStrings"), [x[0] for x in self.model.words]))],
                (self.stats, 1)
            ]))
Exemple #22
0
    def update(self, *arg):

        ord = Settings.get('ana_which')
        cat = Settings.get('ana_what')
        limit = Settings.get('ana_many')
        count = Settings.get('ana_count')
        hist = time.time() - Settings.get('history') * 86400.0

        sql = """select data,12.0/time as wpm,
            100.0-100.0*misses/cast(total as real) as accuracy,
            viscosity,total,misses,
            total*time*time*(1.0+misses/total) as damage
                from
                    (select data,agg_median(time) as time,agg_median(viscosity) as viscosity,
                    sum(count) as total,sum(mistakes) as misses
                    from statistic where w >= ? and type = ? group by data)
                where total >= ?
                order by %s limit %d""" % (ord, limit)

        self.model.setData(DB.fetchall(sql, (hist, cat, count)))
Exemple #23
0
    def update(self, *arg):

        ord = Settings.get('ana_which')
        cat = Settings.get('ana_what')
        limit = Settings.get('ana_many')
        count = Settings.get('ana_count')
        hist = time.time() - Settings.get('history') * 86400.0

        sql = """select data,12.0/time as wpm,
            100.0-100.0*misses/cast(total as real) as accuracy,
            viscosity,total,misses,
            total*time*time*(1.0+misses/total) as damage
                from
                    (select data,agg_median(time) as time,agg_median(viscosity) as viscosity,
                    sum(count) as total,sum(mistakes) as misses
                    from statistic where w >= ? and type = ? group by data)
                where total >= ?
                order by %s limit %d""" % (ord, limit)

        self.model.setData(DB.fetchall(sql, (hist, cat, count)))
Exemple #24
0
    def update(self):
        texts = DB.fetchone("select count(*) from text", (0,))[0]
        self.progressbar.set_fraction(1/4)
        results = DB.fetchone("select count(*) from result", (0,))[0]
        self.progressbar.set_fraction(2/4)
        keys, trigrams, words = DB.fetchall(
            "select count(*),sum(count) from statistic group by type order by type")
        self.progressbar.set_fraction(3/4)
        first = DB.fetchone("select w from result order by w asc limit 1", (time.time(), ))[0]
        self.progressbar.set_fraction(4/4)

        total = keys[0] + trigrams[0] + words[0]
        history = (time.time() - first) / 86400

        self.stats.set_text(f"""
Texts: {texts}
Results: {results}
Analysis data: {total} ({keys[0]} keys, {trigrams[0]} trigrams, {words[0]} words)
{keys[1]} characters and {words[1]} words typed in total.
First result was {round(history, 2)} days ago.
""")

        self.progressbar.set_fraction(0)
Exemple #25
0
 def add_from_typed(self):
     query = 'select distinct data from statistic where type = 2 order by random()'
     words = [x[0] for x in DB.fetchall(query)]
     self.filter_words(words)