Esempio n. 1
0
 def create(cls, s, *sources):
     composite = add(s, Composite(n_components=0))
     for source in sources:
         add(
             s,
             CompositeComponent(input_source=source,
                                output_source=composite))
         composite.n_components += 1
     return composite
Esempio n. 2
0
 def __topic_journal(self, s, topic):
     tjournal = s.query(TopicJournal). \
         filter(TopicJournal.topic == topic,
                TopicJournal.date == self._date).one_or_none()
     if not tjournal:
         tjournal = add(s, TopicJournal(topic=topic, date=self._date))
     return tjournal
Esempio n. 3
0
def for_modified_files(log, s, paths, callback, owner, force=False):
    '''
    This takes a callback because we need to know whether to mark the file as read or not
    after processing.  The callback should return True on success.

    The session is used throughout, but not passed to the callback.  The callback can
    contain the same session as internal state or create its own.  We avoid open
    transactions across the callback.
    '''

    for file_path in paths:

        last_modified = to_time(stat(file_path).st_mtime)
        hash = md5_hash(file_path)
        path_scan = s.query(FileScan). \
            filter(FileScan.path == file_path,
                   FileScan.owner == owner).one_or_none()

        # get last scan and make sure it's up-to-date
        if path_scan:
            if hash != path_scan.md5_hash:
                log.warning(
                    'File at %s appears to have changed since last read on %s')
                path_scan.md5_hash = hash
                path_scan.last_scan = 0.0
        else:
            # need to_time here because it's not roundtripped via the database to convert for use below
            path_scan = add(
                s,
                FileScan(path=file_path,
                         owner=owner,
                         md5_hash=hash,
                         last_scan=to_time(0.0)))
            s.flush()  # want this to appear in queries below

        # only look at hash if we are going to process anyway
        if force or last_modified > path_scan.last_scan:

            hash_scan = s.query(FileScan). \
                filter(FileScan.md5_hash == hash,
                       FileScan.owner == owner).\
                order_by(desc(FileScan.last_scan)).limit(1).one()  # must exist as path_scan is a candidate
            if hash_scan.path != path_scan.path:
                log.warning('Ignoring duplicate file (details in debug log)')
                log.debug('%s' % file_path)
                log.debug('%s' % hash_scan.path)
                # update the path to avoid triggering in future
                path_scan.last_scan = hash_scan.last_scan

            s.commit()

            if force or last_modified > hash_scan.last_scan:
                if callback(file_path):
                    log.debug('Marking %s as scanned' % file_path)
                    path_scan.last_scan = last_modified  # maybe use now?
                    s.commit()
                else:
                    log.debug('Not marking %s as scanned' % file_path)
Esempio n. 4
0
def injury_notes(old_name, new_name):
    injury_id = next(
        old.execute('''select id from injury where name like ?''',
                    [old_name]))[0]
    topic = s.query(Topic).filter(Topic.name == new_name).one()
    notes = s.query(StatisticName).filter(
        StatisticName.name == 'Notes',
        StatisticName.constraint == topic.id).one()
    for row in old.execute(
            '''select date, notes from injury_diary where injury_id = ?''',
        [injury_id]):
        if row['notes']:
            # print(row['notes'], len(row['notes']))
            tj = add(s, TopicJournal(time=to_time(row['date']), topic=topic))
            add(
                s,
                StatisticJournalText(statistic_name=notes,
                                     source=tj,
                                     value=row['notes']))
Esempio n. 5
0
 def test_set(self):
     with NamedTemporaryFile() as f:
         args, db = bootstrap_file(f, m(V), '5')
         bootstrap_file(f, m(V), '5', mm(DEV), configurator=default)
         with db.session_context() as s:
             source = add(s, Source())
             n = s.query(count(Timestamp.id)).scalar()
             self.assertEqual(n, 0)
             Timestamp.set(s, TestTimestamp, source=source)
             n = s.query(count(Timestamp.id)).scalar()
             self.assertEqual(n, 1)
             t = s.query(Timestamp).filter(Timestamp.owner == TestTimestamp).one()
             self.assertAlmostEqual(t.time.timestamp(), dt.datetime.now().timestamp(), 1)
Esempio n. 6
0
 def add_value(self, s, value, time=None, date=None):
     from ch2.squeal.utils import add
     if time and date:
         raise Exception('Specify one or none of time and date for %s' % self)
     if not time and not date:
         time = 0.0  # important this is a float and not an int (or it would be an erroneous date)
     if date:
         time = local_date_to_time(date)
     if time and self.single:
         raise Exception('%s was given time %s but is not time-variable' % (self, format_time(time)))
     sjournal = STATISTIC_JOURNAL_CLASSES[self.statistic_name.statistic_journal_type](
         statistic_name=self.statistic_name, source=self, value=value, time=time)
     self.validate(sjournal)
     return add(s, sjournal)
Esempio n. 7
0
def filter_modified_files(s, paths, owner, force=False):

    modified = []

    for file_path in paths:

        last_modified = to_time(stat(file_path).st_mtime)
        hash = md5_hash(file_path)
        path_scan = s.query(FileScan). \
            filter(FileScan.path == file_path,
                   FileScan.owner == owner).one_or_none()

        # get last scan and make sure it's up-to-date
        if path_scan:
            if hash != path_scan.md5_hash:
                log.warning(
                    'File at %s appears to have changed since last read on %s')
                path_scan.md5_hash = hash
                path_scan.last_scan = 0.0
        else:
            # need to_time here because it's not roundtripped via the database to convert for use below
            path_scan = add(
                s,
                FileScan(path=file_path,
                         owner=owner,
                         md5_hash=hash,
                         last_scan=to_time(0.0)))
            s.flush()  # want this to appear in queries below

        # only look at hash if we are going to process anyway
        if force or last_modified > path_scan.last_scan:

            hash_scan = s.query(FileScan). \
                filter(FileScan.md5_hash == hash,
                       FileScan.owner == owner).\
                order_by(desc(FileScan.last_scan)).limit(1).one()  # must exist as path_scan is a candidate
            if hash_scan.path != path_scan.path:
                log.warning('Ignoring duplicate file (details in debug log)')
                log.debug('%s' % file_path)
                log.debug('%s' % hash_scan.path)
                # update the path to avoid triggering in future
                path_scan.last_scan = hash_scan.last_scan

            if force or last_modified > hash_scan.last_scan:
                modified.append(file_path)

    s.commit()
    return modified
Esempio n. 8
0
    def test_sources(self):

        with NamedTemporaryFile() as f:

            args, db = bootstrap_file(f, m(V), '5', configurator=acooke)

            with db.session_context() as s:

                # add a diary entry

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                d = add(s, TopicJournal(topic=diary, date='2018-09-29'))
                d.populate(log, s)
                self.assertEqual(len(d.topic.fields), 9,
                                 list(enumerate(map(str, d.topic.fields))))
                self.assertEqual(d.topic.fields[0].statistic_name.name,
                                 'Notes')
                self.assertEqual(d.topic.fields[1].statistic_name.name,
                                 'Weight', str(d.topic.fields[1]))
                for field in d.topic.fields:
                    if field in d.statistics:
                        self.assertTrue(d.statistics[field].value is None,
                                        field)
                d.statistics[d.topic.fields[0]].value = 'hello world'
                d.statistics[d.topic.fields[1]].value = 64.5

            with db.session_context() as s:

                # check the diary entry was persisted

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                d = s.query(TopicJournal).filter(
                    TopicJournal.topic == diary,
                    TopicJournal.date == '2018-09-29').one()
                s.flush()
                d.populate(log, s)
                self.assertEqual(len(d.topic.fields), 9,
                                 list(enumerate(map(str, d.topic.fields))))
                self.assertEqual(d.topic.fields[0].statistic_name.name,
                                 'Notes')
                self.assertEqual(d.statistics[d.topic.fields[0]].value,
                                 'hello world')
                self.assertEqual(d.topic.fields[1].statistic_name.name,
                                 'Weight')
                self.assertEqual(d.statistics[d.topic.fields[1]].value, 64.5)
                self.assertEqual(d.statistics[d.topic.fields[1]].type,
                                 StatisticJournalType.FLOAT)

            # generate summary stats

            SummaryCalculator(db, schedule='m').run()
            SummaryCalculator(db, schedule='y').run()

            with db.session_context() as s:

                # check the summary stats

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                weight = s.query(StatisticJournal).join(StatisticName). \
                    filter(StatisticName.owner == diary, StatisticName.name == 'Weight').one()
                self.assertEqual(weight.value, 64.5)
                self.assertEqual(len(weight.measures), 2, weight.measures)
                self.assertEqual(weight.measures[0].rank, 1)
                self.assertEqual(weight.measures[0].percentile, 100,
                                 weight.measures[0].percentile)
                n = s.query(count(StatisticJournalFloat.id)).scalar()
                self.assertEqual(n, 4, n)
                n = s.query(count(StatisticJournalInteger.id)).scalar()
                self.assertEqual(n, 11, n)
                m_avg = s.query(StatisticJournalFloat).join(StatisticName). \
                    filter(StatisticName.name == 'Avg/Month Weight').one()
                self.assertEqual(m_avg.value, 64.5)
                y_avg = s.query(StatisticJournalFloat).join(StatisticName). \
                    filter(StatisticName.name == 'Avg/Year Weight').one()
                self.assertEqual(y_avg.value, 64.5)
                month = s.query(Interval).filter(
                    Interval.schedule == 'm').one()
                self.assertEqual(month.start, to_date('2018-09-01'),
                                 month.start)
                self.assertEqual(month.finish, to_date('2018-10-01'),
                                 month.finish)

            with db.session_context() as s:

                # delete the diary entry

                diary = s.query(Topic).filter(Topic.name == 'Diary').one()
                d = s.query(TopicJournal).filter(
                    TopicJournal.topic == diary,
                    TopicJournal.date == '2018-09-29').one()
                s.delete(d)

            run('sqlite3 %s ".dump"' % f.name, shell=True)

            with db.session_context() as s:

                # check the delete cascade

                self.assertEqual(s.query(count(TopicJournal.id)).scalar(), 0)
                # this should be zero because the Intervals were automatically deleted
                for source in s.query(Source).all():
                    print(source)
                for journal in s.query(StatisticJournal).all():
                    print(journal)
                self.assertEqual(
                    s.query(count(Source.id)).scalar(), 11,
                    list(map(str,
                             s.query(Source).all())))  # constants
                self.assertEqual(
                    s.query(count(StatisticJournalText.id)).scalar(), 7,
                    s.query(count(StatisticJournalText.id)).scalar())
                self.assertEqual(
                    s.query(count(StatisticJournal.id)).scalar(), 7,
                    s.query(count(StatisticJournal.id)).scalar())
Esempio n. 9
0
fields = dict((field.statistic_name.name, field.statistic_name)
              for field in diary.fields)
notes = fields['Notes']
mood = fields['Mood']
hr = fields['Rest HR']
weight = fields['Weight']
sleep = fields['Sleep']
weather = fields['Weather']
meds = fields['Medication']

for row in old.execute(
        '''select date, notes, rest_heart_rate, sleep, mood, weather, medication, weight from diary''',
    []):
    if row['notes'] or row['mood'] or row['rest_heart_rate'] or row[
            'weight'] or row['sleep'] or row['weather']:
        tj = add(s, TopicJournal(time=to_time(row['date']), topic=diary))
        if row['notes']:
            add(
                s,
                StatisticJournalText(statistic_name=notes,
                                     source=tj,
                                     value=row['notes']))
        if row['mood']:
            add(
                s,
                StatisticJournalInteger(statistic_name=mood,
                                        source=tj,
                                        value=row['mood']))
        if row['rest_heart_rate']:
            add(
                s,