def generate_struct_implementation(self, genfile): """ create C code for the implementation of the struct functions. it is used to fill a struct instance with data of a csv data line. """ # returned snippets ret = list() # variables to be replaced in the function template template_args = { "member_count": self.name_struct + "::member_count", "delimiter": genfile.DELIMITER, "struct_name": self.name_struct, } # create a list of lines for each parser # a parser converts one csv line to struct entries parsers = gen_dict_key2lists(genfile.member_methods.keys()) # place all needed parsers into their requested member function destination for idx, (member_name, member_type) in enumerate(self.members.items()): for parser in member_type.get_parsers(idx, member_name): parsers[parser.destination].append(parser) # create parser snippets and return them for parser_type, parser_list in parsers.items(): ret.append(genfile.member_methods[parser_type].get_snippet( parser_list, file_name=self.name_struct_file, class_name=self.name_struct, data=template_args, )) return ret
def generate_struct_implementation(self, genfile): """ create C code for the implementation of the struct functions. it is used to fill a struct instance with data of a csv data line. """ # returned snippets ret = list() # constexpr member count definition ret.append(ContentSnippet( data="constexpr size_t %s::member_count;" % self.name_struct, file_name=self.name_struct_file, section=ContentSnippet.section_body, orderby=self.name_struct, )) # variables to be replaced in the function template template_args = { "member_count": self.name_struct + "::member_count", "delimiter": genfile.DELIMITER, "struct_name": self.name_struct, } # create a list of lines for each parser # a parser converts one csv line to struct entries parsers = gen_dict_key2lists(genfile.member_methods.keys()) # place all needed parsers into their requested member function destination for idx, (member_name, member_type) in enumerate(self.members.items()): for parser in member_type.get_parsers(idx, member_name): parsers[parser.destination].append(parser) # create parser snippets and return them for parser_type, parser_list in parsers.items(): ret.append( genfile.member_methods[parser_type].get_snippet( parser_list, file_name = self.name_struct_file, class_name = self.name_struct, data = template_args, ) ) return ret
def read(self, raw, offset, cls=None, members=None): """ recursively read defined binary data from raw at given offset. this is used to fill the python classes with data from the binary input. """ if cls: target_class = cls else: target_class = self dbg(lazymsg=lambda: "-> 0x%08x => reading %s" % (offset, repr(cls)), lvl=3) # break out of the current reading loop when members don't exist in source data file stop_reading_members = False if not members: members = target_class.get_data_format(allowed_modes=(True, READ_EXPORT, READ, READ_UNKNOWN), flatten_includes=False) for is_parent, export, var_name, var_type in members: if stop_reading_members: if isinstance(var_type, DataMember): replacement_value = var_type.get_empty_value() else: replacement_value = 0 setattr(self, var_name, replacement_value) continue if isinstance(var_type, GroupMember): if not issubclass(var_type.cls, Exportable): raise Exception("class where members should be included is not exportable: %s" % var_type.cls.__name__) if isinstance(var_type, IncludeMembers): # call the read function of the referenced class (cls), # but store the data to the current object (self). offset = var_type.cls.read(self, raw, offset, cls=var_type.cls) else: # create new instance of referenced class (cls), # use its read method to store data to itself, # then save the result as a reference named `var_name` # TODO: constructor argument passing may be required here. grouped_data = var_type.cls() offset = grouped_data.read(raw, offset) setattr(self, var_name, grouped_data) elif isinstance(var_type, MultisubtypeMember): # subdata reference implies recursive call for reading the binary data # arguments passed to the next-level constructor. varargs = dict() if var_type.passed_args: if type(var_type.passed_args) == str: var_type.passed_args = set(var_type.passed_args) for passed_member_name in var_type.passed_args: varargs[passed_member_name] = getattr(self, passed_member_name) # subdata list length has to be defined beforehand as a object member OR number. # it's name or count is specified at the subdata member definition by length. list_len = var_type.get_length(self) # prepare result storage lists if isinstance(var_type, SubdataMember): # single-subtype child data list setattr(self, var_name, list()) single_type_subdata = True else: # multi-subtype child data list setattr(self, var_name, gen_dict_key2lists(var_type.class_lookup.keys())) single_type_subdata = False # check if entries need offset checking if var_type.offset_to: offset_lookup = getattr(self, var_type.offset_to[0]) else: offset_lookup = None for i in range(list_len): # if datfile offset == 0, entry has to be skipped. if offset_lookup: if not var_type.offset_to[1](offset_lookup[i]): continue # TODO: don't read sequentially, use the lookup as new offset? if single_type_subdata: # append single data entry to the subdata object list new_data_class = var_type.class_lookup[None] else: # to determine the subtype class, read the binary definition # this utilizes an on-the-fly definition of the data to be read. offset = self.read( raw, offset, cls=target_class, members=(((False,) + var_type.subtype_definition),) ) # read the variable set by the above read call to # use the read data to determine the denominaton of the member type subtype_name = getattr(self, var_type.subtype_definition[1]) # look up the type name to get the subtype class new_data_class = var_type.class_lookup[subtype_name] if not issubclass(new_data_class, Exportable): raise Exception("dumped data is not exportable: %s" % new_data_class.__name__) # create instance of submember class new_data = new_data_class(**varargs) # dbg(lazymsg=lambda: "%s: calling read of %s..." % (repr(self), repr(new_data)), lvl=4) # recursive call, read the subdata. offset = new_data.read(raw, offset, new_data_class) # append the new data to the appropriate list if single_type_subdata: getattr(self, var_name).append(new_data) else: getattr(self, var_name)[subtype_name].append(new_data) else: # reading binary data, as this member is no reference but actual content. data_count = 1 is_custom_member = False if type(var_type) == str: # TODO: generate and save member type on the fly # instead of just reading is_array = vararray_match.match(var_type) if is_array: struct_type = is_array.group(1) data_count = is_array.group(2) if struct_type == "char": struct_type = "char[]" if integer_match.match(data_count): # integer length data_count = int(data_count) else: # dynamic length specified by member name data_count = getattr(self, data_count) else: struct_type = var_type data_count = 1 elif isinstance(var_type, DataMember): # special type requires having set the raw data type struct_type = var_type.raw_type data_count = var_type.get_length(self) is_custom_member = True else: raise Exception("unknown data member definition %s for member '%s'" % (var_type, var_name)) if data_count < 0: raise Exception("invalid length %d < 0 in %s for member '%s'" % (data_count, var_type, var_name)) if struct_type not in struct_type_lookup: raise Exception("%s: member %s requests unknown data type %s" % (repr(self), var_name, struct_type)) if export == READ_UNKNOWN: # for unknown variables, generate uid for the unknown memory location var_name = "unknown-0x%08x" % offset # lookup c type to python struct scan type symbol = struct_type_lookup[struct_type] # read that stuff!!11 dbg(lazymsg=lambda: " @0x%08x: reading %s<%s> as '< %d%s'" % (offset, var_name, var_type, data_count, symbol), lvl=4) struct_format = "< %d%s" % (data_count, symbol) result = struct.unpack_from(struct_format, raw, offset) dbg(lazymsg=lambda: " \_ = %s" % (result, ), lvl=4) if is_custom_member: if not var_type.verify_read_data(self, result): raise Exception("invalid data when reading %s at offset %# 08x" % (var_name, offset)) # TODO: move these into a read entry hook/verification method if symbol == "s": # stringify char array result = zstr(result[0]) elif data_count == 1: # store first tuple element result = result[0] if symbol == "f": import math if not math.isfinite(result): raise Exception("invalid float when reading %s at offset %# 08x" % (var_name, offset)) # increase the current file position by the size we just read offset += struct.calcsize(struct_format) # run entry hook for non-primitive members if is_custom_member: result = var_type.entry_hook(result) if result == ContinueReadMember.ABORT: # don't go through all other members of this class! stop_reading_members = True # store member's data value setattr(self, var_name, result) dbg(lazymsg=lambda: "<- 0x%08x <= finished %s" % (offset, repr(cls)), lvl=3) return offset