Esempio n. 1
0
def write_fitfile_to_csv(file, output_file='test_output.csv'):
    fitfile = fitparse.FitFile(
        file, data_processor=fitparse.StandardUnitsDataProcessor())
    messages = fitfile.messages
    data = []
    for m in messages:
        skip = False
        if not hasattr(m, 'fields'):
            continue
        fields = m.fields
        #check for important data types
        mdata = {}
        for field in fields:
            if field.name in allowed_fields:
                if field.name == 'timestamp':
                    mdata[field.name] = UTC.localize(
                        field.value).astimezone(CST)
                else:
                    mdata[field.name] = field.value
        for rf in required_fields:
            if rf not in mdata:
                skip = True
        if not skip:
            data.append(mdata)
    #write to csv
    with open(output_file, 'w') as f:
        writer = csv.writer(f)
        writer.writerow(allowed_fields)
        for entry in data:
            writer.writerow([str(entry.get(k, '')) for k in allowed_fields])
Esempio n. 2
0
def main(input_directory, output_file):
    logging.info(f"processing {input_directory}")

    dirs = [dI for dI in os.listdir(input_directory) if os.path.isdir(join(input_directory,dI))]

    dirs.sort()

    headers = set(['timestamp', 'heart_rate', 'activity_type', 'activity_type_last_timestamp'])
    data = []

    for dir in dirs:
        current_dir = join(input_directory,dir)
        logging.info(f"processing directory {current_dir}")

        files = os.listdir(current_dir)
        files.sort()

        fit_files = [file for file in files if file[-4:].lower() == '.fit']

        for file in fit_files:
            current_file =join(current_dir, file)
            fitfile = fitparse.FitFile(current_file, data_processor=fitparse.StandardUnitsDataProcessor())
            logging.info(f"converting {current_file}")
            convert_file(fitfile, headers, data)

    write_to_csv(headers, data, output_file)
    logging.info('finished conversions')
def main():
    for folder in folders:
        # returns a list containing the names of the entries in the directory given by path
        path = os.path.join(os.getcwd(), 'Data Collection', folder)
        person_dirs = os.listdir(path)                                    # dirs --> ['04-112520-245pm-JJ Kim']

        for psdir in person_dirs:
            person = dict()
            person[psdir.split('-')[-1]] = psdir.split('-')[0:3]          # person --> {'Hannah Flynn': ['08', '112720', '124pm']}

            # Obtain .fit files
            garmincon_path = os.path.join(path, psdir, "Garmin Connect")  # garmincon_path --> ./Garmin Connect
            dirs = os.listdir(garmincon_path)                             # dirs_temp --> ['2020-11-27-124pm']
            garmin_folder_path = os.path.join(garmincon_path, dirs[0])    # only one dir in dirs --> dirs[0]
            fitfiles = os.listdir(garmin_folder_path)
            fitfiles_wellness = [file for file in fitfiles if file[-13:].lower() == '_wellness.fit']

            for file in fitfiles_wellness:
                csv_filename = file[:-4] + '.csv'
                if os.path.exists(csv_filename):
                    continue
                fitfile_path = os.path.join(garmin_folder_path, file)
                fitfile = fitparse.FitFile(fitfile_path, data_processor=fitparse.StandardUnitsDataProcessor())
                print('converting %s' % fitfile_path)
                write_fitfile_to_csv(fitfile, garmin_folder_path, csv_filename)
            print('finished conversions')

            collect_person(person, garmin_folder_path)
Esempio n. 4
0
    def parse(self):
        if self.status != Fit.Status.NONE:
            return
        self.status = Fit.Status.PARSING
        self.emit('status-changed', self.status)

        try:
            self.fit = fitparse.FitFile(
                self.filename,
                data_processor=fitparse.StandardUnitsDataProcessor())
        except FitParseError:
            self.status = Fit.Status.FAILED
            GObject.idle_add(lambda: self.emit('status-changed', self.status))
            return

        self.fit.parse()

        # find the summary message
        for msg in self.fit.messages:
            if msg.name == 'session':
                self.summary = msg
                break

        self.status = Fit.Status.PARSED
        GObject.idle_add(lambda: self.emit('status-changed', self.status))
Esempio n. 5
0
def main(file):
    new_filename = file[:-4] + '.csv'
    fitfile = fitparse.FitFile(
        file, data_processor=fitparse.StandardUnitsDataProcessor())

    print('converting %s' % file)
    filefile = write_fitfile_to_csv(fitfile, new_filename)
    print('finished conversions')
    return filefile
Esempio n. 6
0
def convertgeofilesbatch(inputpath, outputpath):
    files = os.scandir(inputpath)
    countTcx = 0
    countFit = 0
    countGpx = 0
    results = []
    results.append([
        'Filename', 'Outputfile', 'Linecount', 'Errorfound', 'Errorcount',
        'Errortext'
    ])
    print(
        '\n\n .............. Converting GPX/TCX/FIT files to a standardised CSV intermediate format \n\n'
    )
    for file in files:
        inputfilename = file.name
        fileid = '_' + inputfilename[:-4]
        if file.name[-3:].upper() == 'FIT':
            outputfilename = '_fit' + inputfilename[:-3] + 'csv'
            cvinput = inputpath + inputfilename
            cvoutput = outputpath + outputfilename
            print('FIT : ', cvinput)
            fitfile = fitparse.FitFile(
                cvinput, data_processor=fitparse.StandardUnitsDataProcessor())
            lines = convertrideFIT(fitfile, cvoutput, fileid)
            results.append([file.name, outputfilename, lines, '', '', ''])
        elif file.name[-3:].upper() == 'TCX':
            outputfilename = '/_tcx' + ''.join(file.name)
            countTcx += 1
            prepTCXfile(inputpath, file.name, outputpath, outputfilename)
            cvinput = inputpath + inputfilename
            cvoutput = outputpath + outputfilename[0:-3] + 'csv'
            print('TCX : ', cvinput)
            lines, error, errorcount, lasterror = convertrideTCX(
                cvinput, cvoutput, fileid)
            results.append([
                file.name, outputfilename[1:-3] + 'csv', lines, error,
                errorcount, lasterror
            ])
        elif file.name[-3:].upper() == 'GPX':
            outputfilename = '/_gpx' + ''.join(file.name)
            countGpx += 1
            cvinput = inputpath + inputfilename
            cvoutput = outputpath + outputfilename[0:-3] + 'csv'
            print('GPX : ', cvinput)
            lines, error, errorcount, lasterror = convertrideGPX(
                cvinput, cvoutput, fileid)
            results.append([
                file.name, outputfilename[1:-3] + 'csv', lines, error,
                errorcount, lasterror
            ])

        with open(outputpath + '/' + '___process_report.csv', 'w',
                  newline='') as f:
            writer = csv.writer(f)
            for line in results:
                writer.writerow(line[i] for i in range(0, len(line)))
        f.close()
Esempio n. 7
0
def readfitfile(fname):
    # print(options)
    fitfile = fitparse.FitFile(
        fname,
        data_processor=fitparse.StandardUnitsDataProcessor(),
        check_crc=not (None),
    )
    records = fitfile.get_messages(name=None,
                                   with_definitions=False,
                                   as_dict=False)
    return records
Esempio n. 8
0
    def dump(self, fit_file):
        fitfile = fitparse.FitFile(
            fit_file,
            data_processor=fitparse.StandardUnitsDataProcessor(),
        )
        activity = model.Activity()
        data = {}

        for message in fitfile.get_messages():
            self._handle_message(message, activity, data)

        return activity
Esempio n. 9
0
def main():
    files = os.listdir()
    fit_files = [file for file in files if file[-4:].lower() == '.fit']
    for file in fit_files:
        new_filename = file[:-4] + '.csv'
        if os.path.exists(new_filename):
            #print('%s already exists. skipping.' % new_filename)
            continue
        fitfile = fitparse.FitFile(
            file, data_processor=fitparse.StandardUnitsDataProcessor())

        print('converting %s' % file)
        write_fitfile_to_csv(fitfile, new_filename)
    print('finished conversions')
Esempio n. 10
0
def main():
    files = os.listdir(fit_data_path)
    fit_files = [file for file in files if file[-4:].lower() == '.fit']
    for file in fit_files:
        new_filename = file[:-4] + '.csv'
        if os.path.exists(new_filename):
            # print('%s already exists. skipping.' % new_filename)
            continue
        fitfile = fitparse.FitFile(
            '{}/fit_data/{}'.format(os.path.pardir, file),
            data_processor=fitparse.StandardUnitsDataProcessor())

        print('Converting {}'.format(file))
        write_fitfile_to_csv(
            fitfile, '{}/fit_data/{}'.format(os.path.pardir, new_filename))
    print('Convert Finished.')
Esempio n. 11
0
def main(args=None):
    options = parse_args(args)

    fitfile = fitparse.FitFile(
        options.infile,
        data_processor=fitparse.StandardUnitsDataProcessor(),
        check_crc=not (options.ignore_crc),
    )
    records = fitfile.get_messages(name=options.name,
                                   with_definitions=options.with_defs,
                                   as_dict=options.as_dict)

    if options.type == "json":
        json.dump(records, fp=options.output, cls=RecordJSONEncoder)
    elif options.type == "readable":
        options.output.writelines(
            format_message(n, record, options)
            for n, record in enumerate(records, 1))
Esempio n. 12
0
def main(
        fit_target_dir,
        fit_processed_csv_dir,
        fit_overwrite,
        fit_ignore_splits_and_laps,
):

    ALT_LOG = os.path.join(fit_processed_csv_dir, ALT_LOG_)
    files = os.listdir(fit_target_dir)
    fit_files = [file for file in files if file[-4:].lower()=='.fit']
    overwritten_files = []
    
    if not os.path.exists(ALT_LOG):
        os.system('touch %s' % ALT_LOG)
        file_list = []
    else:
        file_list = read_log(fit_processed_csv_dir)
        
    for file in fit_files:
        is_overwritten=False
        if file in file_list and not fit_overwrite:
            continue
        elif file in file_list:
            is_overwritten=True
            
        new_filename = file[:-4] + '.csv'
        
        fitfile = fitparse.FitFile(
            os.path.join(fit_target_dir, file),  
            data_processor=fitparse.StandardUnitsDataProcessor()
        )
        
        print('converting %s' % os.path.join(fit_target_dir, file))
        write_fitfile_to_csv(
            fitfile,
            new_filename,
            file,
            fit_target_dir,
            fit_processed_csv_dir,
            is_overwritten,
            fit_ignore_splits_and_laps,
        )
    print('finished conversions')
def read_fit_file(fit_input, allowed_fields, required_fields):
    # Open file using firparse library and assign messages to variable
    fitfile = fitparse.FitFile(
        fit_input, data_processor=fitparse.StandardUnitsDataProcessor())
    messages = fitfile.messages
    # messages[10].fields

    data = []
    for m in messages:
        skip = False
        fields = m.fields

        # create an empty set to collect data in
        mdata = {}

        # check for important data types
        for field in fields:
            # Only read allowed fields
            if field.name in allowed_fields:
                # 'timestamp' gets special treatment by converting to
                # local time zone
                if field.name == 'timestamp':
                    mdata[field.name] = UTC.localize(
                        field.value).astimezone(CST)
                else:
                    # Zwift files have duplicate fields, one with value other
                    # with 'None'. This now only adds fields the either don't
                    # exist yet or with a value other than 'None'
                    if field.name not in mdata or field.value != None:
                        mdata[field.name] = field.value

        # Make sure all required fields have been read. If not, skip this item
        for rf in required_fields:
            if rf not in mdata:
                skip = True
        if not skip:
            # Append data to mdata if all required fields are present
            data.append(mdata)
    return data
Esempio n. 14
0
def main():
    files = os.listdir()
    fit_files = [file for file in files if file[-4:].lower() == '.fit']
    if ALT_FILENAME:
        if not os.path.exists(ALT_LOG):
            os.system('touch %s' % ALT_FILENAME)
            file_list = []
        else:
            file_list = read_log()
    for file in fit_files:
        if ALT_FILENAME:
            if file in file_list:
                continue
        new_filename = file[:-4] + '.csv'
        if os.path.exists(new_filename) and not ALT_FILENAME:
            #print('%s already exists. skipping.' % new_filename)
            continue
        fitfile = fitparse.FitFile(
            file, data_processor=fitparse.StandardUnitsDataProcessor())

        print('converting %s' % file)
        write_fitfile_to_csv(fitfile, new_filename, file)
    print('finished conversions')
Esempio n. 15
0
def getFit(filename):
    sports = {
        'cycling': 'Bike',
        'rowing': 'Row',
        'running': 'Run',
        'fitness_equipment': 'Erg',
        'cross_country_skiing': 'Rollerski',
        'skate_skiing': 'Rollerski'
    }
    subsports = {'skate_skiing': 'Rollerski'}
    sampleFields = {
        'enhanced_speed': 'KPH',
        'power': 'WATTS',
        'Power2': 'WATTS',
        'enhanced_altitude': 'ALT',
        'cadence': 'CAD',
        'Cadence2': 'CAD',
        'distance': 'KM',
        'heart_rate': 'HR',
        'position_lat': 'LAT',
        'position_long': 'LON',
        'timestamp': 'SECS'
    }

    data = {}
    ride = {}
    ride['TAGS'] = {}
    ride['TAGS']['Source Filename'] = filename
    samples = []

    fitfile = fitparse.FitFile(
        filename, data_processor=fitparse.StandardUnitsDataProcessor())
    messages = fitfile.get_messages()

    # from pprint import pprint
    # pprint(obj)
    t0 = False
    t_gap = 0  # counts the omitted parts of session
    t_last = 0  # measures the gap to last recording to establish gap
    for obj in messages:
        # other obj.names: activity, lap, device_info
        if (obj.name == 'file_id'):
            ride['DEVICETYPE'] = obj.get_value('manufacturer')
            ride['TAGS']['Device'] = obj.get_value('manufacturer')
        if (obj.name == 'session'):
            ride['TAGS']['Date'] = obj.get_value('timestamp')
            if obj.get_value('sport') in sports:
                ride['TAGS']['Sport'] = sports[obj.get_value('sport')]
            else:
                ride['TAGS']['Sport'] = obj.get_value('sport')
            if obj.get_value('sub_sport') in subsports:
                ride['TAGS']['SubSport'] = subsports[obj.get_value(
                    'sub_sport')]
            else:
                ride['TAGS']['SubSport'] = obj.get_value('sub_sport')

        if obj.name == 'record':
            sample = {}
            for d in obj:
                if d.name in sampleFields:
                    if d.name == 'timestamp':
                        sample['dt'] = d.value
                        if not t0:
                            t0 = d.value
                            sample['SECS'] = 0
                        else:
                            if ((d.value - t0).seconds - t_last) > 10:
                                t_gap += (d.value - t0).seconds - t_last - 1
                            t_last = (d.value - t0).seconds
                            sample['SECS'] = t_last - t_gap
                    elif d.value:
                        sample[sampleFields[d.name]] = d.value
            for field in sampleFields:  # fill all missing with 0
                if sampleFields[field] not in sample:
                    sample[sampleFields[field]] = 0
            if sample['WATTS'] > 1800:
                sample['WATTS'] = 0
            samples.append(sample)

    watts = False
    alt = False
    cad = False
    cad = False
    km = False
    hr = False
    lat = False
    lon = False

    for sample in samples:
        if sample['ALT'] > 0:
            alt = True
        if sample['CAD'] > 0:
            cad = True
        if sample['KM'] > 0:
            km = True
        if sample['HR'] > 0:
            hr = True
        if sample['LAT'] != 0:
            lat = True
        if sample['LON'] != 0:
            lon = True
        if sample['WATTS'] > 0:
            watts = True

    for sample in samples:
        if not alt:
            sample.pop('ALT')
        if not cad:
            sample.pop('CAD')
        if not km:
            sample.pop('KM')
        if not hr:
            sample.pop('HR')
        if not lat:
            sample.pop('LAT')
        if not lon:
            sample.pop('LON')
        if not watts:
            sample.pop('WATTS')

    # resample to 1s
    df = pd.DataFrame(samples)
    df['dts'] = [dt.datetime.fromtimestamp(s) for s in df.SECS.values]
    # df = df[~df.index.duplicated(keep='first')] # drop duplicated seconds
    df = df.set_index('dts')
    df = df.resample('1s').bfill()
    df = df.reset_index()
    df['SECS'] = list(range(len(df.index)))
    samples = df.to_dict('records')
    # print(samples)

    ride['STARTTIME'] = t0.strftime('%Y/%m/%d %H:%M:%S UTC ')
    ride['RECINTSECS'] = 1
    ride['IDENTIFIER'] = ""
    ride['SAMPLES'] = samples
    data['RIDE'] = ride
    return data
Esempio n. 16
0
    def import_fit_file(self, username, user_id, file_name, original_file_name,
                        desired_activity_id):
        """Imports the specified FIT file."""
        """Caller can request an activity ID by specifying a value to desired_activity_id."""

        # Sanity check.
        if not os.path.isfile(file_name):
            raise Exception("File does not exist.")

        activity_type = ''
        sub_activity_type = ''

        start_time_unix = 0
        end_time_unix = 0

        locations = []
        cadences = []
        heart_rate_readings = []
        power_readings = []
        temperatures = []
        events = []

        fit_file = fitparse.FitFile(
            file_name, data_processor=fitparse.StandardUnitsDataProcessor())
        for message in fit_file.messages:

            if not hasattr(message, 'fields'):
                continue

            fields = message.fields

            message_data = {}
            for field in fields:
                message_data[field.name] = field.value

            if 'sport' in message_data and isinstance(message_data['sport'],
                                                      str):
                activity_type = message_data['sport']
            if 'sub_sport' in message_data and isinstance(
                    message_data['sub_sport'], str):
                sub_activity_type = message_data['sub_sport']
            if 'timestamp' not in message_data:
                continue

            dt_obj = message_data['timestamp']
            dt_tuple = dt_obj.timetuple()
            dt_unix_seconds = calendar.timegm(dt_tuple)
            dt_unix = dt_unix_seconds * 1000

            # Update start and end times.
            if 'event_type' in message_data and message_data[
                    'event_type'] == 'start':
                start_time_unix = dt_unix_seconds
            end_time_unix = dt_unix_seconds

            # If the start has not been found yet, then continue.
            if start_time_unix == 0:
                continue

            # Look for location and sensor data.
            if 'position_long' in message_data and 'position_lat' in message_data and message_data[
                    'position_lat'] is not None and message_data[
                        'position_long'] is not None:
                location = []
                location.append(dt_unix)
                location.append(float(message_data['position_lat']))
                location.append(float(message_data['position_long']))
                if 'enhanced_altitude' in message_data:
                    location.append(float(message_data['enhanced_altitude']))
                locations.append(location)
            if 'cadence' in message_data and message_data[
                    'cadence'] is not None:
                reading = []
                reading.append(dt_unix)
                reading.append(float(message_data['cadence']))
                cadences.append(reading)
            if 'heart_rate' in message_data and message_data[
                    'heart_rate'] is not None:
                reading = []
                reading.append(dt_unix)
                reading.append(float(message_data['heart_rate']))
                heart_rate_readings.append(reading)
            if 'power' in message_data and message_data['power'] is not None:
                reading = []
                reading.append(dt_unix)
                reading.append(float(message_data['power']))
                power_readings.append(reading)
            if 'temperature' in message_data and message_data[
                    'temperature'] is not None:
                reading = []
                reading.append(dt_unix)
                reading.append(float(message_data['temperature']))
                temperatures.append(reading)
            if 'event' in message_data and message_data['event'] is not None:
                events.append(message_data)

        # Make sure this is not a duplicate activity.
        if self.activity_writer.is_duplicate_activity(user_id, start_time_unix,
                                                      desired_activity_id):
            raise Exception("Duplicate activity.")

        # Since we don't have anything else, use the file name as the name of the activity.
        activity_name = os.path.splitext(
            os.path.basename(original_file_name))[0]

        # Figure out the type of the activity.
        normalized_activity_type = Importer.normalize_activity_type(
            activity_type, sub_activity_type, activity_name)

        # Indicate the start of the activity.
        device_str, activity_id = self.activity_writer.create_activity(
            username, user_id, activity_name, "", normalized_activity_type,
            start_time_unix, desired_activity_id)

        # Write all the locations at once.
        self.activity_writer.create_activity_locations(device_str, activity_id,
                                                       locations)

        # Write all the sensor readings at once.
        self.activity_writer.create_activity_sensor_readings(
            activity_id, Keys.APP_CADENCE_KEY, cadences)
        self.activity_writer.create_activity_sensor_readings(
            activity_id, Keys.APP_HEART_RATE_KEY, heart_rate_readings)
        self.activity_writer.create_activity_sensor_readings(
            activity_id, Keys.APP_POWER_KEY, power_readings)
        self.activity_writer.create_activity_sensor_readings(
            activity_id, Keys.APP_TEMP_KEY, temperatures)
        self.activity_writer.create_activity_events(activity_id, events)

        # Let it be known that we are finished with this activity.
        self.activity_writer.finish_activity(activity_id, end_time_unix)
        return True, device_str, activity_id
Esempio n. 17
0
def convert_fit_to_csv(in_file, out_file):
    """
    convert .fit file to .csv file.  Note, only certain fields are processed based on the dive project.
    :param in_file: input fit file
    :param out_file: output csv file
    """
    fit_parse = fitparse.FitFile(in_file, data_processor=fitparse.StandardUnitsDataProcessor())

    save_vx = 0
    save_dx = 0
    save_vy = 0
    save_dy = 0
    save_vz = 0
    save_dz = 0

    data = []
    for m in fit_parse.messages:
        if not hasattr(m, 'fields'):
            continue

        # for debug - print out all the fields in the message - can use this to look at the all available information
        # print(m.fields)

        # turn m.fields array into a fields dictionary for ease of processing (lookup by name)
        fields = {k.name: k.value for k in m.fields}
        if 'compressed_calibrated_accel_x' in fields:
            timestamp = fields['timestamp']
            num_samples = len(fields['compressed_calibrated_accel_x'])
            vx = [0.0] * (num_samples + 1)
            dx = [0.0] * (num_samples + 1)
            vy = [0.0] * (num_samples + 1)
            dy = [0.0] * (num_samples + 1)
            vz = [0.0] * (num_samples + 1)
            dz = [0.0] * (num_samples + 1)

            vx[0] = save_vx
            dx[0] = save_dx
            vy[0] = save_vy
            dy[0] = save_dy
            vz[0] = save_vz
            dz[0] = save_dz
            dt = 1/25

            for i in range(num_samples):
                vx[i+1] = fields['compressed_calibrated_accel_x'][i] * (9.80665/1000) * dt + vx[i]
                dx[i+1] = vx[i] * dt + dx[i]
                vy[i + 1] = fields['compressed_calibrated_accel_y'][i] * (9.80665 / 1000) * dt + vy[i]
                dy[i + 1] = vy[i] * dt + dy[i]
                vz[i + 1] = fields['compressed_calibrated_accel_z'][i] * (9.80665 / 1000) * dt + vz[i]
                dz[i + 1] = vz[i] * dt + dz[i]

                # turn this row into the multiple rows dimensioned by number of samples.  The timestamp is adjusted
                # to offset by the number of samples taken within the message
                row = {'type': 'A',
                       'timestamp': timestamp - timedelta(milliseconds=((num_samples-i-1) * 1000 / num_samples)),
                       'accel_x': fields['compressed_calibrated_accel_x'][i],
                       'accel_y': fields['compressed_calibrated_accel_y'][i],
                       'accel_z': fields['compressed_calibrated_accel_z'][i],
                       'vel_x': vx[i+1],
                       'dis_x': dx[i+1],
                       'vel_y': vy[i + 1],
                       'dis_y': dy[i + 1],
                       'vel_z': vz[i + 1],
                       'dis_z': dz[i + 1],
                       }
                data.append(row)
            save_vx = vx[num_samples]
            save_dx = dx[num_samples]
            save_vy = vy[num_samples]
            save_dy = dy[num_samples]
            save_vz = vz[num_samples]
            save_dz = dz[num_samples]

        elif 'heart_rate' in fields:
            row = {'type': 'H',
                   'timestamp': fields['timestamp'],
                   'heart_rate': fields['heart_rate']}
            data.append(row)
        elif 'position_lat' in fields:
            row = {'type': 'G',
                   'timestamp': fields['timestamp'],
                   'lat': fields['position_lat'],
                   'lon': fields['position_long']}
            data.append(row)

        if 'mag_x' in fields and fields['mag_x'] is not None:
            row = {'type': 'M',
                   'timestamp':fields['timestamp'],
                   'mag_x': fields['mag_x'],
                   'mag_y': fields['mag_y'],
                   'mag_z': fields['mag_z']}
            data.append(row)

    # write out csv
    with open(out_file, 'w') as f:
        writer = csv.DictWriter(f, delimiter=',', lineterminator='\n', fieldnames=header_fields)
        writer.writeheader()
        writer.writerows(data)
Esempio n. 18
0
        skip = False
        if not hasattr(m, 'fields'):
            continue
        fields = m.fields
        #check for important data types
        mdata = {}
        for field in fields:
            if field.name in allowed_fields:
                if field.name == 'timestamp':
                    mdata[field.name] = UTC.localize(
                        field.value).astimezone(CST)
                else:
                    mdata[field.name] = field.value
        for rf in required_fields:
            if rf not in mdata:
                skip = True
        if not skip:
            data.append(mdata)
    #write to csv
    with open(output_file, 'w+') as f:
        writer = csv.writer(f)
        writer.writerow(allowed_fields)
        for entry in data:
            writer.writerow([str(entry.get(k)) for k in allowed_fields])
    print('wrote %s' % output_file)


fitfile = fitparse.FitFile(
    '2227169497.fit', data_processor=fitparse.StandardUnitsDataProcessor())
newfilename = 'something.csv'
write_fitfile_to_csv(fitfile, newfilename)
Esempio n. 19
0
        if isinstance(obj, datetime.datetime):
            return obj.isoformat()
        if isinstance(obj, fitparse.DataMessage):
            return {
                "type": obj.name,
                "data": {data.name: data.value
                         for data in obj}
            }
        return super(RecordJSONEncoder, self).default(obj)


for fname in os.listdir(input_dir):
    if fname.endswith(".fit"):
        name, ext = str.split(fname, '.')
        output_file = output_dir + '/' + name + '.json'
        if os.path.exists(output_file):
            continue

        try:
            fitfile = fitparse.FitFile(
                input_dir + '/' + fname,
                data_processor=fitparse.StandardUnitsDataProcessor(),
                check_crc=False)

            records = fitfile.get_messages(name='session')

            fp = codecs.open(output_file, 'w', encoding='UTF-8')
            json.dump(records, fp=fp, cls=RecordJSONEncoder)
        except:
            print("Error reading " + fname)
def get_fit_dfs(fit_file_path):
    # some prerequisite variables
    allowed_fields = ['timestamp','position_lat','position_long', 'distance',
    'enhanced_altitude', 'altitude','enhanced_speed',
                     'speed', 'heart_rate','cadence','fractional_cadence','power',
                     'temperature']
    required_fields = ['timestamp', 'position_lat', 'position_long', 'altitude']
    
    
    
    #for laps
    lap_fields = ['timestamp','start_time','start_position_lat','start_position_long',
                   'end_position_lat','end_position_long','total_elapsed_time','total_timer_time',
                   'total_distance','total_strides','total_calories','enhanced_avg_speed','avg_speed',
                   'enhanced_max_speed','max_speed','total_ascent','total_descent',
                   'event','event_type','avg_heart_rate','max_heart_rate',
                   'avg_running_cadence','max_running_cadence',
                   'lap_trigger','sub_sport','avg_fractional_cadence','max_fractional_cadence',
                   'total_fractional_cycles','avg_vertical_oscillation','avg_temperature','max_temperature']
    #last field above manually generated
    lap_required_fields = ['timestamp', 'start_time','lap_trigger']
    
    #start/stop events
    start_fields = ['timestamp','timer_trigger','event','event_type','event_group']
    start_required_fields = copy(start_fields)
    #
    all_allowed_fields = set(allowed_fields + lap_fields + start_fields)
    
    UTC = pytz.UTC
    CST = pytz.timezone('US/Central')
    
    # actual processing steps
    fitfile = fitparse.FitFile(fit_file_path,
                               data_processor=fitparse.StandardUnitsDataProcessor())
    messages = fitfile.messages
    data = []
    lap_data = []
    start_data = []
    timestamp = get_timestamp(messages)
    event_type = get_event_type(messages)
    if event_type is None:
        event_type = 'other'
    for m in messages:# m = messages[0]
        skip=False
        skip_lap = False 
        skip_start = False 
        if not hasattr(m, 'fields'):
            continue
        fields = m.fields
        #check for important data types
        mdata = {}
        for field in fields:# field = fields[0]
            if field.name in all_allowed_fields:
                if field.name=='timestamp':
                    mdata[field.name] = UTC.localize(field.value).astimezone(CST)
                else:
                    mdata[field.name] = field.value
        for rf in required_fields:
            if rf not in mdata:
                skip=True
        for lrf in lap_required_fields:
            if lrf not in mdata:
                skip_lap = True 
        for srf in start_required_fields:
            if srf not in mdata:
                skip_start = True
        if not skip:
            data.append(mdata)
        elif not skip_lap:
            lap_data.append(mdata)
        elif not skip_start:
            start_data.append(mdata)

    data_df = pd.DataFrame(data)
    lap_data_df = pd.DataFrame(lap_data)
    start_data_df = pd.DataFrame(start_data)
    
    return data_df, lap_data_df, start_data_df