def get_dev_type(self, dev_data_index, field_def_num): if dev_data_index not in self.dev_types: if self.check_developer_data: raise FitParseError( "No such dev_data_index=%s found when looking up field %s" % (dev_data_index, field_def_num) ) warnings.warn( "Dev type for dev_data_index=%s missing. Adding dummy dev type." % (dev_data_index) ) self._append_dev_data_id(dev_data_index) dev_type = self.dev_types[dev_data_index] if field_def_num not in dev_type['fields']: if self.check_developer_data: raise FitParseError( "No such field %s for dev_data_index %s" % (field_def_num, dev_data_index) ) warnings.warn( "Field %s for dev_data_index %s missing. Adding dummy field." % (field_def_num, dev_data_index) ) self._append_dev_field_description( dev_data_index=dev_data_index, field_def_num=field_def_num ) return dev_type['fields'][field_def_num]
def get_dev_type(dev_data_index, field_def_num): if dev_data_index not in DEV_TYPES: raise FitParseError("No such dev_data_index=%s found when looking up field %s" % (dev_data_index, field_def_num)) elif field_def_num not in DEV_TYPES[dev_data_index]['fields']: raise FitParseError("No such field %s for dev_data_index %s" % (field_def_num, dev_data_index)) return DEV_TYPES[dev_data_index]['fields'][field_def_num]
def add_dev_field_description(self, message): dev_data_index = message.get_raw_value('developer_data_index') field_def_num = message.get_raw_value('field_definition_number') base_type_id = message.get_raw_value('fit_base_type_id') field_name = message.get_raw_value('field_name') or "unnamed_dev_field_%s" % field_def_num units = message.get_raw_value("units") native_field_num = message.get_raw_value('native_field_num') if dev_data_index not in self.dev_types: if self.check_developer_data: raise FitParseError("No such dev_data_index=%s found" % (dev_data_index)) warnings.warn( "Dev type for dev_data_index=%s missing. Adding dummy dev type." % (dev_data_index) ) self._append_dev_data_id(dev_data_index) fields = self.dev_types[int(dev_data_index)]['fields'] # Note that nothing in the spec says overwriting an existing field is invalid fields[field_def_num] = DevField( dev_data_index=dev_data_index, def_num=field_def_num, type=BASE_TYPES[base_type_id], name=field_name, units=units, native_field_num=native_field_num )
def add_dev_field_description(message): global DEV_TYPES dev_data_index = message.get('developer_data_index').raw_value field_def_num = message.get('field_definition_number').raw_value base_type_id = message.get('fit_base_type_id').raw_value field_name = message.get('field_name').raw_value units = message.get('units').raw_value native_field_num = message.get('native_field_num') if native_field_num is not None: native_field_num = native_field_num.raw_value if dev_data_index not in DEV_TYPES: raise FitParseError("No such dev_data_index=%s found" % (dev_data_index)) fields = DEV_TYPES[int(dev_data_index)]['fields'] # Note that nothing in the spec says overwriting an existing field is invalid fields[field_def_num] = DevField(dev_data_index=dev_data_index, def_num=field_def_num, type=BASE_TYPES[base_type_id], name=field_name, units=units, native_field_num=native_field_num)
def _parse_definition_message(self, header): # Read reserved byte and architecture byte to resolve endian endian = '>' if self._read_struct('xB') else '<' # Read rest of header with endian awareness global_mesg_num, num_fields = self._read_struct('HB', endian=endian) mesg_type = MESSAGE_TYPES.get(global_mesg_num) field_defs = [] for n in range(num_fields): field_def_num, field_size, base_type_num = self._read_struct('3B', endian=endian) # Try to get field from message type (None if unknown) field = mesg_type.fields.get(field_def_num) if mesg_type else None base_type = BASE_TYPES.get(base_type_num, BASE_TYPE_BYTE) if (field_size % base_type.size) != 0: # NOTE: we could fall back to byte encoding if there's any # examples in the wild. For now, just throw an exception raise FitParseError("Invalid field size %d for type '%s' (expected a multiple of %d)" % ( field_size, base_type.name, base_type.size)) # If the field has components that are accumulators # start recording their accumulation at 0 if field and field.components: for component in field.components: if component.accumulate: accumulators = self._accumulators.setdefault(global_mesg_num, {}) accumulators[component.def_num] = 0 field_defs.append(FieldDefinition( field=field, def_num=field_def_num, base_type=base_type, size=field_size, )) dev_field_defs = [] if header.is_developer_data: num_dev_fields = self._read_struct('B', endian=endian) for n in range(num_dev_fields): field_def_num, field_size, dev_data_index = self._read_struct('3B', endian=endian) field = get_dev_type(dev_data_index, field_def_num) dev_field_defs.append(DevFieldDefinition( field=field, dev_data_index=dev_data_index, def_num=field_def_num, size=field_size )) def_mesg = DefinitionMessage( header=header, endian=endian, mesg_type=mesg_type, mesg_num=global_mesg_num, field_defs=field_defs, dev_field_defs=dev_field_defs, ) self._local_mesgs[header.local_mesg_num] = def_mesg return def_mesg
def _read_struct(self, fmt, endian='<', data=None, always_tuple=False): fmt_with_endian = endian + fmt size = struct.calcsize(fmt_with_endian) if size <= 0: raise FitParseError("Invalid struct format: %s" % fmt_with_endian) if data is None: data = self._read(size) unpacked = struct.unpack(fmt_with_endian, data) # Flatten tuple if it's got only one value return unpacked if (len(unpacked) > 1) or always_tuple else unpacked[0]
def _read_struct(self, fmt, endian='<', data=None, always_tuple=False): fmt_with_endian = "%s%s" % (endian, fmt) size = struct.calcsize(fmt_with_endian) if size <= 0: raise FitParseError("Invalid struct format: %s" % fmt_with_endian) if data is None: data = self._read(size) if size != len(data): raise FitEOFError("Tried to read %d bytes from .FIT file but got %d" % (size, len(data))) unpacked = struct.unpack(fmt_with_endian, data) # Flatten tuple if it's got only one value return unpacked if (len(unpacked) > 1) or always_tuple else unpacked[0]
def _append_dev_field_description(self, dev_data_index, field_def_num, type=BASE_TYPE_BYTE, name=None, units=None, native_field_num=None): if dev_data_index not in self.dev_types: if self.check_developer_data: raise FitParseError("No such dev_data_index=%s found" % (dev_data_index)) warnings.warn( "Dev type for dev_data_index=%s missing. Adding dummy dev type." % (dev_data_index) ) self._append_dev_data_id(dev_data_index) self.dev_types[dev_data_index]["fields"][field_def_num] = DevField( dev_data_index=dev_data_index, def_num=field_def_num, type=type, name=name, units=units, native_field_num=native_field_num )
def _parse_data_message(self, header): def_mesg = self._local_mesgs.get(header.local_mesg_num) if not def_mesg: raise FitParseError( 'Got data message with invalid local message type %d' % (header.local_mesg_num)) raw_values = self._parse_raw_values_from_data_message(def_mesg) field_datas = [ ] # TODO: I don't love this name, update on DataMessage too # TODO: Maybe refactor this and make it simpler (or at least broken # up into sub-functions) for field_def, raw_value in zip( def_mesg.field_defs + def_mesg.dev_field_defs, raw_values): field, parent_field = field_def.field, None if field: field, parent_field = self._resolve_subfield( field, def_mesg, raw_values) # Resolve component fields if field.components: for component in field.components: # Render its raw value cmp_raw_value = component.render(raw_value) # Apply accumulated value if component.accumulate: accumulator = self._accumulators[def_mesg.mesg_num] cmp_raw_value = self._apply_compressed_accumulation( cmp_raw_value, accumulator[component.def_num], component.bits, ) accumulator[component.def_num] = cmp_raw_value # Apply scale and offset from component, not from the dynamic field # as they may differ cmp_raw_value = self._apply_scale_offset( component, cmp_raw_value) # Extract the component's dynamic field from def_mesg cmp_field = def_mesg.mesg_type.fields[ component.def_num] # Resolve a possible subfield cmp_field, cmp_parent_field = self._resolve_subfield( cmp_field, def_mesg, raw_values) cmp_value = cmp_field.render(cmp_raw_value) # Plop it on field_datas field_datas.append( FieldData( field_def=None, field=cmp_field, parent_field=cmp_parent_field, value=cmp_value, raw_value=cmp_raw_value, )) # TODO: Do we care about a base_type and a resolved field mismatch? # My hunch is we don't value = self._apply_scale_offset(field, field.render(raw_value)) else: value = raw_value # Update compressed timestamp field if (field_def.def_num == FIELD_TYPE_TIMESTAMP.def_num) and (raw_value is not None): self._compressed_ts_accumulator = raw_value field_datas.append( FieldData( field_def=field_def, field=field, parent_field=parent_field, value=value, raw_value=raw_value, )) # Apply timestamp field if we got a header if header.time_offset is not None: ts_value = self._compressed_ts_accumulator = self._apply_compressed_accumulation( header.time_offset, self._compressed_ts_accumulator, 5, ) field_datas.append( FieldData( field_def=None, field=FIELD_TYPE_TIMESTAMP, parent_field=None, value=FIELD_TYPE_TIMESTAMP.render(ts_value), raw_value=ts_value, )) # Apply data processors for field_data in field_datas: # Apply type name processor self._processor.run_type_processor(field_data) self._processor.run_field_processor(field_data) self._processor.run_unit_processor(field_data) data_message = DataMessage(header=header, def_mesg=def_mesg, fields=field_datas) self._processor.run_message_processor(data_message) return data_message