コード例 #1
0
 def test_other_bad(self):
     bad = read_fit(join(self.test_dir, 'source/other/2018-04-15-09-18-20.fit'))
     fixed = fix(bytearray(bad), drop=True, fix_checksum=True, fix_header=True)
     with self.assertTextMatch(join(self.test_dir, 'target/other/TestFixFit.test_unknown_bad:1')) as output:
         summarize(RECORDS, fixed, output=output)
     bad = read_fit(join(self.test_dir, 'source/other/2018-02-24-10-04-10.fit'))
     fixed = fix(bytearray(bad), drop=True, fix_checksum=True, fix_header=True)
     with self.assertTextMatch(join(self.test_dir, 'target/other/TestFixFit.test_unknown_bad:2')) as output:
         summarize(RECORDS, fixed, output=output)
コード例 #2
0
ファイル: test_fix_fit.py プロジェクト: aldo-dev01/choochoo
 def test_null(self):
     good = read_fit(
         self.log, join(self.test_dir,
                        'source/personal/2018-08-27-rec.fit'))
     same = fix(bytearray(good))
     self.assertTrue(good is not same)  # check making a copy
     self.assertEqual(good, same)
コード例 #3
0
 def test_drop(self):
     bad = read_fit(join(self.test_dir, 'source/other/8CS90646.FIT'))
     fixed = fix(bad, drop=True, fix_checksum=True, fix_header=True)
     self.assertTrue(len(fixed) < len(bad))
     with self.assertTextMatch(
             'data/test/target/other/TestFixFit.test_drop') as output:
         summarize(RECORDS, fixed, output=output)
コード例 #4
0
ファイル: test_fit.py プロジェクト: aldo-dev01/choochoo
 def standard_dmp(self, source, target, format, filters=None):
     with self.assertTextMatch(target, filters=filters) as output:
         summarize(format,
                   read_fit(log, source),
                   warn=True,
                   profile_path=self.profile_path,
                   output=output)
コード例 #5
0
 def test_personal(self):
     for fit_file in glob(join(self.test_dir, 'source/personal/*.fit')):
         file_name = basename(fit_file)
         with self.assertTextMatch(
                 join(self.test_dir, 'target/personal/TestFit.test_personal:' + file_name)) as output:
             summarize_tables(read_fit(fit_file), width=80, output=output,
                              profile_path=self.profile_path)
コード例 #6
0
ファイル: test_fit.py プロジェクト: aldo-dev01/choochoo
    def test_python(self):
        # for other-projects.md
        from logging import basicConfig, getLogger, INFO
        from ch2.fit.profile.profile import read_fit, read_profile
        from ch2.fit.format.records import fix_degrees, no_units
        from ch2.fit.format.read import parse_data

        basicConfig(level=INFO)
        log = getLogger()

        data = read_fit(log, 'data/test/source/personal/2018-07-26-rec.fit')
        types, messages = read_profile(log)
        state, tokens = parse_data(data, types, messages)

        LAT, LONG = 'position_lat', 'position_long'
        positions = []

        for offset, token in tokens:
            record = token.parse_token().as_dict(no_units, fix_degrees)
            if record.name == 'record':
                positions.append((record.data[LAT][0], record.data[LONG][0]))

        print('Read %s positions' % len(positions))

        self.assertAlmostEqual(positions[0][0], -33.42, places=1)
        self.assertAlmostEqual(positions[0][1], -70.61, places=1)
コード例 #7
0
 def test_no_last_byte(self):
     good = read_fit(join(self.test_dir, 'source/personal/2018-08-27-rec.fit'))
     same = fix(bytearray(good), drop=True, fix_checksum=True, fix_header=True)
     self.assertEqual(same, good)
     fixed = fix(bytearray(good)[:-1], drop=True, fix_checksum=True, fix_header=True)
     self.assertEqual(fixed, good)
     fixed = fix(bytearray(good)[:-2], drop=True, fix_checksum=True, fix_header=True)
     self.assertEqual(fixed, good)
コード例 #8
0
 def test_other_header(self):
     bad = read_fit(join(self.test_dir, 'source/other/8CS90646.FIT'))
     old_header = FileHeader(bad)
     fixed = fix(bytearray(bad), drop=True, header_size=27, fix_checksum=True, fix_header=True)
     new_header = FileHeader(fixed)
     self.assertEqual(new_header.header_size, 27)
     self.assertEqual(new_header.protocol_version, old_header.protocol_version)
     self.assertEqual(new_header.profile_version, old_header.profile_version)
コード例 #9
0
 def test_pyfitparse_fix_drop_2(self):
     for file in ('event_timestamp.fit',  # data size incorrect
                  'antfs-dump.63.fit',  # strange timestamp
                  'compressed-speed-distance.fit',  # strange timestamp
                  ):
         bad = read_fit(join(self.test_dir, 'source/python-fitparse', file))
         with self.assertBinaryMatch(join(self.test_dir, 'source/python-fitparse-fix', file)) as output:
             output.write(fix(bad, drop=True, max_drop_cnt=2, fix_checksum=True, fix_header=True))
コード例 #10
0
 def test_pyfitparse_fix_drop(self):
     for file in ('activity-unexpected-eof.fit',  # data size incorrect
                  'activity-settings-nodata.fit',   # data size incorrect
                  'activity-settings-corruptheader.fit',  # data size incorrect
                  ):
         bad = read_fit(join(self.test_dir, 'source/python-fitparse', file))
         with self.assertBinaryMatch(join(self.test_dir, 'source/python-fitparse-fix', file)) as output:
             output.write(fix(bad, drop=True, fix_checksum=True, fix_header=True))
コード例 #11
0
 def test_pyfitparse_fix_header(self):
     for file in ('activity-filecrc.fit',  # bad checksum
                  'activity-activity-filecrc.fit',  # data size incorrect
                  'activity-settings.fit',  # data size incorrect
                  ):
         bad = read_fit(join(self.test_dir, 'source/python-fitparse', file))
         with self.assertBinaryMatch(join(self.test_dir, 'source/python-fitparse-fix', file)) as output:
             output.write(fix(bad, fix_checksum=True, fix_header=True))
コード例 #12
0
ファイル: test_fit.py プロジェクト: aldo-dev01/choochoo
 def standard_csv(self, fit_path, csv_path, filters=None):
     if filters is None: filters = []
     if EXC_HDR_CHK not in filters: filters = [EXC_HDR_CHK] + filters
     with self.assertCSVMatch(csv_path, filters=filters) as output:
         summarize_csv(read_fit(log, fit_path),
                       profile_path=self.profile_path,
                       warn=True,
                       output=output,
                       internal=True)
コード例 #13
0
 def test_null_slices(self):
     good = read_fit(
         join(self.test_dir, 'source/personal/2018-08-27-rec.fit'))
     same = fix(bytearray(good),
                slices=':',
                fix_checksum=True,
                fix_header=True)
     self.assertTrue(good is not same)  # check making a copy
     self.assertEqual(good, same)
コード例 #14
0
 def test_drop_bug(self):
     bad = read_fit(
         join(self.test_dir, 'source/personal/2018-07-26-rec.fit'))
     fix(bad,
         drop=True,
         fix_checksum=True,
         fix_header=True,
         max_delta_t=60,
         max_fwd_len=500)
コード例 #15
0
 def test_decode(self):
     types, messages, records = \
         filtered_records(read_fit(join(self.test_dir, 'source/personal/2018-07-26-rec.fit')),
                          profile_path=self.profile_path)
     with self.assertTextMatch(join(self.test_dir, 'target/personal/TestFit.test_decode'),
                               filters=[HEX_ADDRESS]) as output:
         for _, _, record in records:
             print(record.into(tuple, filter=chain(no_names, append_units, no_bad_values, fix_degrees)),
                   file=output)
コード例 #16
0
 def test_timestamp_16(self):
     types, messages, records = \
         filtered_records(read_fit(join(self.test_dir, 'source/personal/[email protected]_24755630065.fit')),
                          profile_path=self.profile_path)
     with self.assertTextMatch(join(self.test_dir, 'target/personal/TestFit.test_timestamp_16'),
                               filters=[HEX_ADDRESS]) as output:
         for _, _, record in records:
             if record.name == 'monitoring':
                 print(record.into(tuple, filter=chain(no_names, append_units, no_bad_values, fix_degrees)),
                       file=output)
コード例 #17
0
 def standard_csv(self, fit_path, csv_path, filters=None):
     if filters is None: filters = []
     if EXC_HDR_CHK not in filters: filters = [EXC_HDR_CHK] + filters
     log.warning(f'Comparing ch2 fit csv {fit_path} and {csv_path}')
     with self.assertCSVMatch(csv_path, filters=filters) as output:
         summarize_csv(read_fit(fit_path),
                       profile_path=self.profile_path,
                       warn=True,
                       output=output,
                       internal=True)
コード例 #18
0
 def test_developer(self):
     with self.assertTextMatch(
             join(self.test_dir,
                  'target/sdk/TestFit.test_developer')) as output:
         summarize(FIELDS,
                   read_fit(
                       join(self.test_dir, 'source/sdk/DeveloperData.fit')),
                   profile_path=self.profile_path,
                   width=80,
                   output=output)
コード例 #19
0
 def test_dump(self):
     with self.assertTextMatch(
             join(self.test_dir,
                  'target/personal/TestFit.test_dump')) as output:
         summarize(FIELDS,
                   read_fit(
                       join(self.test_dir,
                            'source/personal/2018-07-30-rec.fit')),
                   profile_path=self.profile_path,
                   width=80,
                   output=output)
コード例 #20
0
ファイル: test_date.py プロジェクト: tuxella/choochoo
 def test_date16(self):
     data = read_fit('data/test/source/other/38014592427.fit')
     # types, messages, records = filtered_records(data, after_records=2342)
     types, messages, records = filtered_records(data)
     previous = None
     for (a, b, record) in records:
         if 'timestamp' in record._fields:
             if previous and previous > record.timestamp:
                 # we have time-travel of 1 minute in this data
                 self.assertTrue(previous - record.timestamp < dt.timedelta(minutes=2),
                                 f'{previous} -> {record.timestamp}')
             previous = record.timestamp
コード例 #21
0
 def test_no_header(self):
     good = read_fit(join(self.test_dir, 'source/personal/2018-08-27-rec.fit'))
     same = fix(bytearray(good), drop=True, fix_checksum=True)
     self.assertTrue(good is not same)  # check making a copy
     self.assertEqual(same, good)
     header = FileHeader(good)
     with self.assertRaisesRegex(Exception, 'Error fixing checksum'):
         fix(bytearray(good)[len(header):], fix_checksum=True, fix_header=True)
     fixed = fix(bytearray(good)[len(header):],
                 add_header=True, drop=True, fix_checksum=True, fix_header=True)
     self.assertEqual(good, fixed)
     fixed = fix(bytearray(good), add_header=True, slices=':14,28:', fix_checksum=True, fix_header=True)
     self.assertEqual(good, fixed)
コード例 #22
0
 def test_slices(self):
     bad = read_fit(join(self.test_dir, 'source/other/8CS90646.FIT'))
     with self.assertRaisesRegex(Exception, 'Error fixing checksum'):
         fix(bad, slices=':1000', fix_checksum=True,
             fix_header=True)  # first 1k bytes only
コード例 #23
0
ファイル: test_fit.py プロジェクト: aldo-dev01/choochoo
 def test_grep(self):
     data = read_fit(
         log, join(self.test_dir, 'source/personal/2018-07-26-rec.fit'))
     summarize(GREP, data, grep=['.*:.*speed>10'])
コード例 #24
0
ファイル: fit.py プロジェクト: clearinterface/choochoo
def fit(args, db):
    '''
## fit

    > ch2 fit SUB-COMMAND PATH [PATH ...]

Print the contents of fit files.

The format and details displayed is selected by the sub-command: records, tables, messages, fields, csv
and grep (the last requiring patterns to match against).

For a list of sub-commands options see `ch2 fit -h`.

For options for a particular sub-command see `ch2 fit sub-command -h`.

Note: When using bash use `shopt -s globstar` to enable ** globbing.

### Examples

    > ch2 -v 0 fit records ride.fit

Will print the contents of the file to stdout (use `-v 0` to suppress logging
or redirect stderr elsewhere).

    > ch2 -v 0 fit grep -p '.*:sport=cycling' --match 0 --name directory/**/*.fit

Will list file names that contain cycling data.

    > ch2 fit grep -p PATTERN -- FILE

You may need a `--` between patterns and file paths so that the argument parser can decide where patterns
finish and paths start.
    '''

    format = args[SUB_COMMAND]
    after_bytes = args[AFTER_BYTES]
    limit_bytes = args[LIMIT_BYTES]
    after_records = args[AFTER_RECORDS]
    limit_records = args[LIMIT_RECORDS]
    warn = args[WARN]
    no_validate = args[no(VALIDATE)]
    max_delta_t = args[MAX_DELTA_T]

    # todo - can this be handled by argparse?
    if (after_records or limit_records != -1) and (after_bytes
                                                   or limit_bytes != -1):
        raise Exception('Constrain either records or bytes, not both')

    for file_path in args[PATH]:

        name_file = file_path if args[NAME] else None
        if name_file and format != GREP:
            print()
            print(name_file)

        data = read_fit(log, file_path)

        if format == RECORDS:
            summarize_records(data,
                              all_fields=args[ALL_FIELDS],
                              all_messages=args[ALL_MESSAGES],
                              internal=args[INTERNAL],
                              after_bytes=after_bytes,
                              limit_bytes=limit_bytes,
                              after_records=after_records,
                              limit_records=limit_records,
                              record_names=args[MESSAGE],
                              field_names=args[FIELD],
                              warn=warn,
                              no_validate=no_validate,
                              max_delta_t=max_delta_t,
                              width=args[WIDTH] or terminal_width())
        elif format == TABLES:
            summarize_tables(data,
                             all_fields=args[ALL_FIELDS],
                             all_messages=args[ALL_MESSAGES],
                             internal=args[INTERNAL],
                             after_bytes=after_bytes,
                             limit_bytes=limit_bytes,
                             after_records=after_records,
                             limit_records=limit_records,
                             record_names=args[MESSAGE],
                             field_names=args[FIELD],
                             warn=warn,
                             no_validate=no_validate,
                             max_delta_t=max_delta_t,
                             width=args[WIDTH] or terminal_width())
        elif format == CSV:
            summarize_csv(data,
                          internal=args[INTERNAL],
                          after_bytes=after_bytes,
                          limit_bytes=limit_bytes,
                          after_records=after_records,
                          limit_records=limit_records,
                          record_names=args[MESSAGE],
                          field_names=args[FIELD],
                          warn=warn,
                          max_delta_t=max_delta_t)
        elif format == GREP:
            summarize_grep(data,
                           args[PATTERN],
                           after_bytes=after_bytes,
                           limit_bytes=limit_bytes,
                           after_records=after_records,
                           limit_records=limit_records,
                           warn=warn,
                           no_validate=no_validate,
                           max_delta_t=max_delta_t,
                           width=args[WIDTH] or terminal_width(),
                           name_file=name_file,
                           match=args[MATCH],
                           compact=args[COMPACT],
                           context=args[CONTEXT],
                           invert=args[NOT])
        elif format == TOKENS:
            summarize_tokens(data,
                             after_bytes=after_bytes,
                             limit_bytes=limit_bytes,
                             after_records=after_records,
                             limit_records=limit_records,
                             warn=warn,
                             no_validate=no_validate,
                             max_delta_t=max_delta_t)
        elif format == FIELDS:
            summarize_fields(data,
                             after_bytes=after_bytes,
                             limit_bytes=limit_bytes,
                             after_records=after_records,
                             limit_records=limit_records,
                             warn=warn,
                             no_validate=no_validate,
                             max_delta_t=max_delta_t)
        else:
            raise Exception('Bad format: %s' % format)
コード例 #25
0
 def test_other_good(self):
     good = read_fit(join(self.test_dir, 'source/other/77F73023.FIT'))
     same = fix(bytearray(good))
     self.assertEqual(good, same)
     self.assertFalse(good is same)