def _get_data_subblocks(name): """Return Adapter to parse GIF data sub-blocks.""" return construct.ExprAdapter( construct.Struct( name, construct.RepeatUntil( lambda obj, ctx: obj.block_size == 0x00, construct.Struct( 'blocks', construct.ULInt8('block_size'), construct.Bytes('data_values', lambda ctx: ctx.block_size), ), ), ), # from comment string, build Containers encoder=lambda obj, ctx: construct.Container(blocks=[ construct.Container( block_size=len(chunk), data_values=chunk, ) for chunk in [obj[i:i + 255] for i in xrange(0, len(obj), 255)] ] + [construct.Container(block_size=0, data_values='')], ), # from Containers, build comment string decoder=lambda obj, ctx: ''.join(dsb.data_values for dsb in obj.blocks), )
def _struct(cls): return construct.Struct( "type" / construct.Enum(construct.Byte, ParamType), "value" / construct.Switch( construct.this.type, { "Int": construct.Int32sl, "Float": construct.ExprAdapter( construct.Bytes(10), lambda obj, ctx: numpy.frombuffer( obj.rjust(16, b"\x00"), dtype=numpy.longdouble), lambda obj, ctx: numpy.longdouble(obj).tobytes()[-10:], ), "Flag": construct.Byte, "Str": construct.PascalString(construct.Int32ul, "cp932"), }, # else 'Var' variable name type construct.Select( construct.PascalString(construct.Int32ul, "cp932"), ), ), )
def _struct(cls): return construct.Struct( 'type' / construct.Enum(construct.Byte, ParamType), 'value' / construct.Switch( construct.this.type, { 'Int': construct.Int32ul, 'Float': construct.ExprAdapter( construct.Bytes(10), lambda obj, ctx: numpy.frombuffer(obj.rjust(16, b'\x00'), dtype=numpy.longdouble), lambda obj, ctx: numpy.longdouble(obj).tobytes()[-10:] ), 'Flag': construct.Byte, 'Str': construct.PascalString(construct.Int32ul, 'cp932'), }, # else 'Var' variable name type construct.Select( construct.PascalString(construct.Int32ul, 'cp932'), ), ), )
def CString(terminator=b"\x00", encoding=None): r""" This is an alternative of implementation of construct.CString() that fixes the issues with working with utf-16 or utf-32 encoded strings (github.com/construct/construct/issues/388) >>> CString().parse(b'hello\x00') 'hello' >>> CString(encoding='utf-16').parse(b'\xff\xfeh\x00e\x00l\x00l\x00o\x00\x00\x00') # FFFE is BOM for utf-16-le u'hello' >>> CString(encoding='utf-16').parse(b'h\x00e\x00l\x00l\x00o\x00\x00\x00') u'hello' >>> CString(encoding='utf-16').build(u'hello') '\xff\xfeh\x00e\x00l\x00l\x00o\x00\x00\x00' >>> CString(encoding='utf-32').build(u'hello') '\xff\xfe\x00\x00h\x00\x00\x00e\x00\x00\x00l\x00\x00\x00l\x00\x00\x00o\x00\x00\x00\x00\x00\x00\x00' Make sure to specify 'le' or 'be' in the encoding if you don't want BOM markers when building. >>> CString(encoding='utf-32-le').build(u'hello') 'h\x00\x00\x00e\x00\x00\x00l\x00\x00\x00l\x00\x00\x00o\x00\x00\x00\x00\x00\x00\x00' >>> CString(encoding='utf-32-be').build(u'hello') '\x00\x00\x00h\x00\x00\x00e\x00\x00\x00l\x00\x00\x00l\x00\x00\x00o\x00\x00\x00\x00' """ # Revert to original if not utf-16 or utf-32. if not encoding or not ('16' in encoding or '32' in encoding): return construct.CString(terminators=terminator, encoding=encoding) size = 4 if '32' in encoding else 2 if len(terminator) == 1: terminator = terminator * size assert len(terminator) == size return construct.StringEncoded( construct.ExprAdapter( RepeatUntil(lambda obj, lst, ctx: obj == terminator, Bytes(size)), encoder=lambda obj, ctx: list( map(b''.join, chunk(py3compat.iteratebytes(obj), size)) ) + [terminator], decoder=lambda obj, ctx: b''.join(obj[:-1])), encoding)
return obj ####################################### # # Basic types & helpers # ####################################### # String prefixed with a variable int size VarString = con.PascalString(IdaVarInt32, "utf8") # Bytes buffer prefixed with a variable int size VarBuff = con.Prefixed(IdaVarInt32, con.GreedyBytes) # IDA typedefs ea_t = asize_t = adiff_t = con.ExprAdapter(IdaVarInt64, con.obj_ - 1, con.obj_ + 1) # "template" for defining object list, prefixed with a variable int size def ObjectList(obj): return con.PrefixedArray(IdaVarInt32, obj) ####################################### # # Lumina types # ####################################### # function signature func_sig_t = con.Struct(
construct.Struct( header=construct.BitStruct( has_seed_hash=construct.Rebuild( construct.Flag, construct.this._.seed_hash != None), bytes_rotation=construct.Rebuild( construct.BitsInteger(7), lambda ctx: single_byte_hash(ctx._.generator_params) >> 1, )), seed_hash=construct.If(construct.this.header.has_seed_hash, construct.Bytes(5)), randovania_version=construct.Bytes(4), # short git hash generator_params=construct.ExprAdapter( construct.Prefixed(construct.VarInt, construct.GreedyBytes), # parsing decoder=create_rotator(inverse=True), # building encoder=create_rotator(inverse=False), ), ))), permalink_checksum=construct.Checksum( construct.Bytes(2), lambda data: hashlib.blake2b(data, digest_size=2).digest(), construct.this.fields.data, ), end=construct.Terminated, ) @dataclasses.dataclass(frozen=True) class Permalink:
class AslParser(interface.BaseParser): """Parser for ASL log files.""" NAME = 'asl_log' DESCRIPTION = u'Parser for ASL log files.' ASL_MAGIC = 'ASL DB\x00\x00\x00\x00\x00\x00' # If not right assigned, the value is "-1". ASL_NO_RIGHTS = 'ffffffff' # Priority level (criticity) ASL_MESSAGE_PRIORITY = { 0: 'EMERGENCY', 1: 'ALERT', 2: 'CRITICAL', 3: 'ERROR', 4: 'WARNING', 5: 'NOTICE', 6: 'INFO', 7: 'DEBUG' } # ASL File header. # magic: magic number that identify ASL files. # version: version of the file. # offset: first record in the file. # timestamp: epoch time when the first entry was written. # last_offset: last record in the file. ASL_HEADER_STRUCT = construct.Struct('asl_header_struct', construct.String('magic', 12), construct.UBInt32('version'), construct.UBInt64('offset'), construct.UBInt64('timestamp'), construct.UBInt32('cache_size'), construct.UBInt64('last_offset'), construct.Padding(36)) # The record structure is: # [HEAP][STRUCTURE][4xExtraField][2xExtraField]*[PreviousEntry] # Record static structure. # tam_entry: it contains the number of bytes from this file position # until the end of the record, without counts itself. # next_offset: next record. If is equal to 0x00, it is the last record. # asl_message_id: integer that has the numeric identification of the event. # timestamp: Epoch integer that has the time when the entry was created. # nanosecond: nanosecond to add to the timestamp. # level: level of priority. # pid: process identification that ask to save the record. # uid: user identification that has lunched the process. # gid: group identification that has lunched the process. # read_uid: identification id of a user. Only applied if is not -1 (all FF). # Only root and this user can read the entry. # read_gid: the same than read_uid, but for the group. ASL_RECORD_STRUCT = construct.Struct('asl_record_struct', construct.Padding(2), construct.UBInt32('tam_entry'), construct.UBInt64('next_offset'), construct.UBInt64('asl_message_id'), construct.UBInt64('timestamp'), construct.UBInt32('nanosec'), construct.UBInt16('level'), construct.UBInt16('flags'), construct.UBInt32('pid'), construct.UBInt32('uid'), construct.UBInt32('gid'), construct.UBInt32('read_uid'), construct.UBInt32('read_gid'), construct.UBInt64('ref_pid')) ASL_RECORD_STRUCT_SIZE = ASL_RECORD_STRUCT.sizeof() # 8-byte fields, they can be: # - String: [Nibble = 1000 (8)][Nibble = Length][7 Bytes = String]. # - Integer: integer that has the byte position in the file that points # to an ASL_RECORD_DYN_VALUE struct. If the value of the integer # is equal to 0, it means that it has not data (skip). # If the field is a String, we use this structure to decode each # integer byte in the corresponding character (ASCII Char). ASL_OCTET_STRING = construct.ExprAdapter(construct.Octet('string'), encoder=lambda obj, ctx: ord(obj), decoder=lambda obj, ctx: chr(obj)) # Field string structure. If the first bit is 1, it means that it # is a String (1000) = 8, then the next nibble has the number of # characters. The last 7 bytes are the number of bytes. ASL_STRING = construct.BitStruct( 'string', construct.Flag('type'), construct.Bits('filler', 3), construct.If(lambda ctx: ctx.type, construct.Nibble('string_length')), construct.If(lambda ctx: ctx.type, construct.Array(7, ASL_OCTET_STRING))) # 8-byte pointer to a byte position in the file. ASL_POINTER = construct.UBInt64('pointer') # Dynamic data structure pointed by a pointer that contains a String: # [2 bytes padding][4 bytes lenght of String][String]. ASL_RECORD_DYN_VALUE = construct.Struct( 'asl_record_dyn_value', construct.Padding(2), construct.PascalString('value', length_field=construct.UBInt32('length'))) def Parse(self, parser_context, file_entry): """Extract entries from an ASL file. Args: parser_context: A parser context object (instance of ParserContext). file_entry: A file entry object (instance of dfvfs.FileEntry). """ file_object = file_entry.GetFileObject() file_object.seek(0, os.SEEK_SET) try: header = self.ASL_HEADER_STRUCT.parse_stream(file_object) except (IOError, construct.FieldError) as exception: file_object.close() raise errors.UnableToParseFile( u'Unable to parse ASL Header with error: {0:s}.'.format( exception)) if header.magic != self.ASL_MAGIC: file_object.close() raise errors.UnableToParseFile( u'Not an ASL Header, unable to parse.') # Get the first and the last entry. offset = header.offset old_offset = header.offset last_offset_header = header.last_offset # If the ASL file has entries. if offset: event_object, offset = self.ReadAslEvent(file_object, offset) while event_object: parser_context.ProduceEvent(event_object, parser_name=self.NAME, file_entry=file_entry) # TODO: an anomaly object must be emitted once that is implemented. # Sanity check, the last read element must be the same as # indicated by the header. if offset == 0 and old_offset != last_offset_header: logging.warning(u'Parsing ended before the header ends.') old_offset = offset event_object, offset = self.ReadAslEvent(file_object, offset) file_object.close() def ReadAslEvent(self, file_object, offset): """Returns an AslEvent from a single ASL entry. Args: file_object: a file-like object that points to an ASL file. offset: offset where the static part of the entry starts. Returns: An event object constructed from a single ASL record. """ # The heap of the entry is saved to try to avoid seek (performance issue). # It has the real start position of the entry. dynamic_start = file_object.tell() dynamic_part = file_object.read(offset - file_object.tell()) if not offset: return None, None try: record_header = self.ASL_RECORD_STRUCT.parse_stream(file_object) except (IOError, construct.FieldError) as exception: logging.warning( u'Unable to parse ASL event with error: {0:s}'.format( exception)) return None, None # Variable tam_fields = is the real length of the dynamic fields. # We have this: [Record_Struct] + [Dynamic_Fields] + [Pointer_Entry_Before] # In Record_Struct we have a field called tam_entry, where it has the number # of bytes until the end of the entry from the position that the field is. # The tam_entry is between the 2th and the 6th byte in the [Record_Struct]. # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before] # Also, we do not need [Point_Entry_Before] and then we delete the size of # [Point_Entry_Before] that it is 8 bytes (8): # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before] # [Dynamic_Fields] = tam_entry - [Record_Struct] + 6 - 8 # [Dynamic_Fields] = tam_entry - [Record_Struct] - 2 tam_fields = record_header.tam_entry - self.ASL_RECORD_STRUCT_SIZE - 2 # Dynamic part of the entry that contains minimal four fields of 8 bytes # plus 2x[8bytes] fields for each extra ASL_Field. # The four first fields are always the Host, Sender, Facility and Message. # After the four first fields, the entry might have extra ASL_Fields. # For each extra ASL_field, it has a pair of 8-byte fields where the first # 8 bytes contains the name of the extra ASL_field and the second 8 bytes # contains the text of the exta field. # All of this 8-byte field can be saved using one of these three differents # types: # - Null value ('0000000000000000'): nothing to do. # - String: It is string if first bit = 1 or first nibble = 8 (1000). # Second nibble has the length of string. # The next 7 bytes have the text characters of the string # padding the end with null characters: '0x00'. # Example: [8468 6964 6400 0000] # [8] String, [4] length, value: [68 69 64 64] = hidd. # - Pointer: static position in the file to a special struct # implemented as an ASL_RECORD_DYN_VALUE. # Example: [0000 0000 0000 0077] # It points to the file position 0x077 that has a # ASL_RECORD_DYN_VALUE structure. values = [] while tam_fields > 0: try: raw_field = file_object.read(8) except (IOError, construct.FieldError) as exception: logging.warning( u'Unable to parse ASL event with error: {0:d}'.format( exception)) return None, None try: # Try to read as a String. field = self.ASL_STRING.parse(raw_field) values.append(''.join(field.string[0:field.string_length])) # Go to parse the next extra field. tam_fields -= 8 continue except ValueError: pass # If it is not a string, it must be a pointer. try: field = self.ASL_POINTER.parse(raw_field) except ValueError as exception: logging.warning( u'Unable to parse ASL event with error: {0:s}'.format( exception)) return None, None if field != 0: # The next IF ELSE is only for performance issues, avoiding seek. # If the pointer points a lower position than where the actual entry # starts, it means that it points to a previuos entry. pos = field - dynamic_start # Bigger or equal 0 means that the data is in the actual entry. if pos >= 0: try: values.append((self.ASL_RECORD_DYN_VALUE.parse( dynamic_part[pos:])).value.partition('\x00')[0]) except (IOError, construct.FieldError) as exception: logging.warning( u'Unable to parse ASL event with error: {0:s}'. format(exception)) return None, None else: # Only if it is a pointer that points to the # heap from another entry we use the seek method. main_position = file_object.tell() # If the pointer is in a previous entry. if main_position > field: file_object.seek(field - main_position, os.SEEK_CUR) try: values.append( (self.ASL_RECORD_DYN_VALUE.parse_stream( file_object)).value.partition('\x00')[0]) except (IOError, construct.FieldError): logging.warning(( u'The pointer at {0:d} (0x{0:x}) points to invalid ' u'information.' ).format(main_position - self.ASL_POINTER.sizeof())) # Come back to the position in the entry. _ = file_object.read(main_position - file_object.tell()) else: _ = file_object.read(field - main_position) values.append((self.ASL_RECORD_DYN_VALUE.parse_stream( file_object)).value.partition('\x00')[0]) # Come back to the position in the entry. file_object.seek(main_position - file_object.tell(), os.SEEK_CUR) # Next extra field: 8 bytes more. tam_fields -= 8 # Read the last 8 bytes of the record that points to the previous entry. _ = file_object.read(8) # Parsed section, we translate the read data to an appropriate format. microsecond = record_header.nanosec // 1000 timestamp = timelib.Timestamp.FromPosixTimeWithMicrosecond( record_header.timestamp, microsecond) record_position = offset message_id = record_header.asl_message_id level = u'{0} ({1})'.format( self.ASL_MESSAGE_PRIORITY[record_header.level], record_header.level) # If the value is -1 (FFFFFFFF), it can be read by everyone. if record_header.read_uid != int(self.ASL_NO_RIGHTS, 16): read_uid = record_header.read_uid else: read_uid = 'ALL' if record_header.read_gid != int(self.ASL_NO_RIGHTS, 16): read_gid = record_header.read_gid else: read_gid = 'ALL' # Parsing the dynamic values (text or pointers to position with text). # The first four are always the host, sender, facility, and message. computer_name = values[0] sender = values[1] facility = values[2] message = values[3] # If the entry has an extra fields, they works as a pairs: # The first is the name of the field and the second the value. extra_information = '' if len(values) > 4: values = values[4:] for index in xrange(0, len(values) // 2): extra_information += (u'[{0}: {1}]'.format( values[index * 2], values[(index * 2) + 1])) # Return the event and the offset for the next entry. return AslEvent(timestamp, record_position, message_id, level, record_header, read_uid, read_gid, computer_name, sender, facility, message, extra_information), record_header.next_offset
class ASLParser(interface.FileObjectParser): """Parser for ASL log files.""" _INITIAL_FILE_OFFSET = None NAME = u'asl_log' DESCRIPTION = u'Parser for ASL log files.' _ASL_MAGIC = b'ASL DB\x00\x00\x00\x00\x00\x00' # ASL File header. # magic: magic number that identify ASL files. # version: version of the file. # offset: first record in the file. # timestamp: time when the first entry was written. # Contains the number of seconds since January 1, 1970 00:00:00 UTC. # last_offset: last record in the file. _ASL_HEADER_STRUCT = construct.Struct(u'asl_header_struct', construct.String(u'magic', 12), construct.UBInt32(u'version'), construct.UBInt64(u'offset'), construct.UBInt64(u'timestamp'), construct.UBInt32(u'cache_size'), construct.UBInt64(u'last_offset'), construct.Padding(36)) # The record structure is: # [HEAP][STRUCTURE][4xExtraField][2xExtraField]*[PreviousEntry] # Record static structure. # tam_entry: it contains the number of bytes from this file position # until the end of the record, without counts itself. # next_offset: next record. If is equal to 0x00, it is the last record. # asl_message_id: integer that has the numeric identification of the event. # timestamp: the entry creation date and time. # Contains the number of seconds since January 1, 1970 00:00:00 UTC. # nanosecond: nanosecond to add to the timestamp. # level: level of priority. # pid: process identification that ask to save the record. # uid: user identification that has lunched the process. # gid: group identification that has lunched the process. # read_uid: identification id of a user. Only applied if is not -1 (all FF). # Only root and this user can read the entry. # read_gid: the same than read_uid, but for the group. _ASL_RECORD_STRUCT = construct.Struct(u'asl_record_struct', construct.Padding(2), construct.UBInt32(u'tam_entry'), construct.UBInt64(u'next_offset'), construct.UBInt64(u'asl_message_id'), construct.UBInt64(u'timestamp'), construct.UBInt32(u'nanosec'), construct.UBInt16(u'level'), construct.UBInt16(u'flags'), construct.UBInt32(u'pid'), construct.UBInt32(u'uid'), construct.UBInt32(u'gid'), construct.UBInt32(u'read_uid'), construct.UBInt32(u'read_gid'), construct.UBInt64(u'ref_pid')) _ASL_RECORD_STRUCT_SIZE = _ASL_RECORD_STRUCT.sizeof() # 8-byte fields, they can be: # - String: [Nibble = 1000 (8)][Nibble = Length][7 Bytes = String]. # - Integer: integer that has the byte position in the file that points # to an ASL_RECORD_DYN_VALUE struct. If the value of the integer # is equal to 0, it means that it has not data (skip). # If the field is a String, we use this structure to decode each # integer byte in the corresponding character (ASCII Char). _ASL_OCTET_STRING = construct.ExprAdapter( construct.Octet(u'string'), encoder=lambda obj, ctx: ord(obj), decoder=lambda obj, ctx: chr(obj)) # Field string structure. If the first bit is 1, it means that it # is a String (1000) = 8, then the next nibble has the number of # characters. The last 7 bytes are the number of bytes. _ASL_STRING = construct.BitStruct( u'string', construct.Flag(u'type'), construct.Bits(u'filler', 3), construct.If(lambda ctx: ctx.type, construct.Nibble(u'string_length')), construct.If(lambda ctx: ctx.type, construct.Array(7, _ASL_OCTET_STRING))) # 8-byte pointer to a byte position in the file. _ASL_POINTER = construct.UBInt64(u'pointer') # Dynamic data structure pointed by a pointer that contains a String: # [2 bytes padding][4 bytes size of String][String]. _ASL_RECORD_DYN_VALUE = construct.Struct( u'asl_record_dyn_value', construct.Padding(2), construct.UBInt32(u'size'), construct.Bytes(u'value', lambda ctx: ctx.size)) def ParseFileObject(self, parser_mediator, file_object, **kwargs): """Parses an ALS file-like object. Args: parser_mediator: a parser mediator object (instance of ParserMediator). file_object: a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. """ file_object.seek(0, os.SEEK_SET) try: header = self._ASL_HEADER_STRUCT.parse_stream(file_object) except (IOError, construct.FieldError) as exception: raise errors.UnableToParseFile( u'Unable to parse ASL Header with error: {0:s}.'.format( exception)) if header.magic != self._ASL_MAGIC: raise errors.UnableToParseFile( u'Not an ASL Header, unable to parse.') offset = header.offset if not offset: return header_last_offset = header.last_offset previous_offset = offset event_object, offset = self.ReadASLEvent(parser_mediator, file_object, offset) while event_object: # Sanity check, the last read element must be the same as # indicated by the header. if offset == 0 and previous_offset != header_last_offset: parser_mediator.ProduceParseError( u'Unable to parse header. Last element header does not match ' u'header offset.') previous_offset = offset event_object, offset = self.ReadASLEvent(parser_mediator, file_object, offset) def ReadASLEvent(self, parser_mediator, file_object, offset): """Reads an ASL record at a specific offset. Args: parser_mediator: a parser mediator object (instance of ParserMediator). file_object: a file-like object that points to an ASL file. offset: an integer containing the offset of the ASL record. Returns: A tuple of an event object extracted from the ASL record, and the offset to the next ASL record in the file. """ # The heap of the entry is saved to try to avoid seek (performance issue). # It has the real start position of the entry. dynamic_data_offset = file_object.tell() try: dynamic_data = file_object.read(offset - dynamic_data_offset) except IOError as exception: parser_mediator.ProduceParseError( u'unable to read ASL record dynamic data with error: {0:s}'. format(exception)) return None, None if not offset: return None, None try: record_struct = self._ASL_RECORD_STRUCT.parse_stream(file_object) except (IOError, construct.FieldError) as exception: parser_mediator.ProduceParseError( u'unable to parse ASL record with error: {0:s}'.format( exception)) return None, None # Variable tam_fields = is the real length of the dynamic fields. # We have this: [Record_Struct] + [Dynamic_Fields] + [Pointer_Entry_Before] # In Record_Struct we have a field called tam_entry, where it has the number # of bytes until the end of the entry from the position that the field is. # The tam_entry is between the 2th and the 6th byte in the [Record_Struct]. # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before] # Also, we do not need [Point_Entry_Before] and then we delete the size of # [Point_Entry_Before] that it is 8 bytes (8): # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before] # [Dynamic_Fields] = tam_entry - [Record_Struct] + 6 - 8 # [Dynamic_Fields] = tam_entry - [Record_Struct] - 2 tam_fields = record_struct.tam_entry - self._ASL_RECORD_STRUCT_SIZE - 2 # Dynamic part of the entry that contains minimal four fields of 8 bytes # plus 2 x [8 bytes] fields for each extra ASL_Field. # The four first fields are always the Host, Sender, Facility and Message. # After the four first fields, the entry might have extra ASL_Fields. # For each extra ASL_field, it has a pair of 8-byte fields where the first # 8 bytes contains the name of the extra ASL_field and the second 8 bytes # contains the text of the extra field. # All of this 8-byte field can be saved using one of these three different # types: # - Null value ('0000000000000000'): nothing to do. # - String: It is string if first bit = 1 or first nibble = 8 (1000). # Second nibble has the length of string. # The next 7 bytes have the text characters of the string # padding the end with null characters: '0x00'. # Example: [8468 6964 6400 0000] # [8] String, [4] length, value: [68 69 64 64] = hidd. # - Pointer: static position in the file to a special struct # implemented as an ASL_RECORD_DYN_VALUE. # Example: [0000 0000 0000 0077] # It points to the file position 0x077 that has a # ASL_RECORD_DYN_VALUE structure. values = [] while tam_fields > 0: try: field_data = file_object.read(8) except IOError as exception: parser_mediator.ProduceParseError( u'unable to read ASL field with error: {0:s}'.format( exception)) return None, None # Try to read the field data as a string. try: asl_string_struct = self._ASL_STRING.parse(field_data) string_data = b''.join( asl_string_struct.string[0:asl_string_struct. string_length]) values.append(string_data) # Go to parse the next extra field. tam_fields -= 8 continue except ValueError: pass # If the field is not a string it must be a pointer. try: pointer_value = self._ASL_POINTER.parse(field_data) except ValueError as exception: parser_mediator.ProduceParseError( u'unable to parse ASL field with error: {0:s}'.format( exception)) return None, None if not pointer_value: # Next extra field: 8 bytes more. tam_fields -= 8 continue # The next IF ELSE is only for performance issues, avoiding seek. # If the pointer points a lower position than where the actual entry # starts, it means that it points to a previous entry. pos = pointer_value - dynamic_data_offset # Greater or equal 0 means that the data is in the actual entry. if pos >= 0: try: dyn_value_struct = self._ASL_RECORD_DYN_VALUE.parse( dynamic_data[pos:]) dyn_value = dyn_value_struct.value.partition(b'\x00')[0] values.append(dyn_value) except (IOError, construct.FieldError) as exception: parser_mediator.ProduceParseError(( u'unable to parse ASL record dynamic value with error: ' u'{0:s}').format(exception)) return None, None else: # Only if it is a pointer that points to the # heap from another entry we use the seek method. main_position = file_object.tell() # If the pointer is in a previous entry. if main_position > pointer_value: file_object.seek(pointer_value - main_position, os.SEEK_CUR) try: dyn_value_struct = self._ASL_RECORD_DYN_VALUE.parse_stream( file_object) dyn_value = dyn_value_struct.value.partition( b'\x00')[0] values.append(dyn_value) except (IOError, construct.FieldError): parser_mediator.ProduceParseError(( u'the pointer at {0:d} (0x{0:08x}) points to invalid ' u'information.' ).format(main_position - self._ASL_POINTER.sizeof())) # Come back to the position in the entry. _ = file_object.read(main_position - file_object.tell()) else: _ = file_object.read(pointer_value - main_position) dyn_value_struct = self._ASL_RECORD_DYN_VALUE.parse_stream( file_object) dyn_value = dyn_value_struct.value.partition(b'\x00')[0] values.append(dyn_value) # Come back to the position in the entry. file_object.seek(main_position - file_object.tell(), os.SEEK_CUR) # Next extra field: 8 bytes more. tam_fields -= 8 # Read the last 8 bytes of the record that points to the previous entry. _ = file_object.read(8) # Parsed section, we translate the read data to an appropriate format. micro_seconds, _ = divmod(record_struct.nanosec, 1000) # Parsing the dynamic values (text or pointers to position with text). # The first four are always the host, sender, facility, and message. number_of_values = len(values) if number_of_values < 4: parser_mediator.ProduceParseError( u'less than four values read from an ASL event.') computer_name = u'N/A' sender = u'N/A' facility = u'N/A' message = u'N/A' if number_of_values >= 1: computer_name = values[0].decode(u'utf-8') if number_of_values >= 2: sender = values[1].decode(u'utf-8') if number_of_values >= 3: facility = values[2].decode(u'utf-8') if number_of_values >= 4: message = values[3].decode(u'utf-8') # If the entry has an extra fields, they works as a pairs: # The first is the name of the field and the second the value. extra_information = u'' if number_of_values > 4 and number_of_values % 2 == 0: # Taking all the extra attributes and merging them together, # eg: a = [1, 2, 3, 4] will look like "1: 2, 3: 4". try: extra_values = map(py2to3.UNICODE_TYPE, values[4:]) extra_information = u', '.join( map(u': '.join, zip(extra_values[0::2], extra_values[1::2]))) except UnicodeDecodeError as exception: parser_mediator.ProduceParseError( u'Unable to decode all ASL values in the extra information fields.' ) event_object = ASLEvent(record_struct.timestamp, offset, record_struct.asl_message_id, record_struct.level, record_struct.pid, record_struct.uid, record_struct.gid, record_struct.read_uid, record_struct.read_gid, computer_name, sender, facility, message, extra_information, micro_seconds=micro_seconds) parser_mediator.ProduceEvent(event_object) return (event_object, record_struct.next_offset)
for i, item in enumerate(obj): key = item.key if key in result: raise construct.ConstructError( f"Key {key} found twice in object", path) last[key] = i result[key] = item.value return result def _encode(self, obj: construct.Container, context, path): return construct.ListContainer( construct.Container(key=type_, value=item) for type_, item in obj.items()) def ConstructDict(subcon): return DictAdapter( PrefixedArray(VarInt, Struct( key=String, value=subcon, ))) JsonEncodedValue = construct.ExprAdapter( String, # Decode lambda obj, ctx: json.loads(obj), # Encode lambda obj, ctx: json.dumps(obj), )
lambda ctx: int(ctx.fcf.dst_addressing_mode), { int(addressing_mode_t.SHORT): short_addr_t, int(addressing_mode_t.LONG): long_addr_t, }), "src_addr" / ct.If( lambda ctx: is_address_present(ctx.fcf.src_addressing_mode), ct.Struct( "pan_id" / ct.IfThenElse( lambda ctx: ctx._.fcf.pan_id_comp and is_address_present( ctx._.fcf.dst_addressing_mode), ct.Computed(ct.this._.dst_addr.pan_id), ct.Hex(ct.Int16ul)), "addr" / ct.Switch( lambda ctx: int(ctx._.fcf.src_addressing_mode), { int(addressing_mode_t.SHORT): ct.Hex(ct.Int16ul), int(addressing_mode_t.LONG): ct.Hex(ct.Int64ul) }))), ) mpdu_t = ct.Struct( "mac" / mac_header_t, "pdu_offset" / ct.Tell, "pdu" / ct.ExprAdapter(ct.HexDump(ct.GreedyBytes), ct.obj_[:-2], ct.obj_ + "AA"), ct.Seek(-2, ct.io.SEEK_CUR), "fcs_offset" / ct.Tell, ct.If(ct.this.pdu_offset > ct.this.fcs_offset, ct.Error), "fcs" / ct.Hex(ct.Int16ul)) phr_t = ct.BitStruct("reserved" / ct.Bit, "size" / ct.BitsInteger(7)) frame_t = ct.Struct("phr" / phr_t, "mpdu" / mpdu_t # warn if phr.size != len(mpdu) )
def _struct(cls): return construct.Struct( "version" / LsbVersionValidator(construct.Int32ul), "project_name" / construct.PascalString(construct.Int32ul, "cp932"), "unk1" / construct.Int64ul, "unk2" / construct.Int64ul, "init_lsb" / construct.PascalString(construct.Int32ul, "cp932"), "exit_lsb" / construct.If( construct.this.version > 0x6D, construct.PascalString(construct.Int32ul, "cp932"), ), "project_dir" / construct.PascalString(construct.Int32ul, "cp932"), "unk3" / construct.Int32ul, "bool1" / construct.Byte, "bool2" / construct.If( construct.this.version >= 0x6A, construct.Byte, ), "audio_formats" / construct.If( construct.this.version >= 0x6D, construct.PascalString(construct.Int32ul, "cp932"), ), "bool3" / construct.If( construct.this.version >= 0x71, construct.Byte, ), "bool4" / construct.If( construct.this.version >= 0x72, construct.Byte, ), "bool5" / construct.If( construct.this.version >= 0x74, construct.Byte, ), "insert_disk_prompt" / construct.PascalString(construct.Int32ul, "cp932"), "exit_prompt" / construct.PascalString(construct.Int32ul, "cp932"), "system_settings" / construct.PrefixedArray( construct.Int32ul, construct.Struct( "type" / construct.Enum(construct.Byte, ParamType), "name" / construct.PascalString(construct.Int32ul, "cp932"), "value" / construct.Switch( construct.this.type, { "Int": construct.Int32sl, "Float": construct.ExprAdapter( construct.Bytes(10), lambda obj, ctx: numpy.frombuffer( obj.rjust(16, b"\x00"), dtype=numpy.longdouble), lambda obj, ctx: numpy.longdouble(obj).tobytes( )[-10:], ), "Flag": construct.Byte, "Str": construct.PascalString(construct.Int32ul, "cp932"), }, ), ), ), )
# Reserved bitfield options extensions = { 'commands': 20, # support for Extension Protocol (BEP 0010) } # support for Peer Metadata Exchange (BEP 0009) UT_METADATA = 3 extended_commands = collections.OrderedDict(ut_metadata=UT_METADATA) _message = c.Struct('message', c.UBInt32('length'), c.Bytes('payload', lambda ctx: ctx.length)) Bytes = lambda name: c.ExprAdapter(c.OptionalGreedyRange(c.StaticField(name, 1)), encoder=lambda obj, ctx : list(obj), decoder=lambda obj, ctx : ''.join(obj) ) _commands = { 'choke' : [c.Magic('\x00')], 'unchoke' : [c.Magic('\x01')], 'interested' : [c.Magic('\x02')], 'uninterested' : [c.Magic('\x03')], 'have' : [c.Magic('\x04'), c.UBInt32('index')], 'bitfield' : [c.Magic('\x05'), Bytes('bits')], 'request' : [c.Magic('\x06'), c.UBInt32('index'), c.UBInt32('begin'), c.UBInt32('length')], 'piece' : [c.Magic('\x07'), c.UBInt32('index'), c.UBInt32('begin'), Bytes('data')], 'cancel' : [c.Magic('\x08'), c.UBInt32('index'), c.UBInt32('begin'), c.UBInt32('length')], 'port' : [c.Magic('\x09'), c.UBInt16('port')], 'extended' : [c.Magic('\x14'), c.UBInt8('cmd'), Bytes('msg')], }
def profile_base(is_v1, recipe_name_encoding="GBK"): """Build a Construct for IHCooker recipes based on version and name encoding.""" return c.Struct( c.Const(3, c.Int8un), "device_version" / c.Default(c.Enum(c.Int8ub, **DEVICE_ID), 1 if is_v1 else 2), "menu_location" / c.Default(c.ExprValidator(c.Int8ub, lambda o, _: 0 <= o < 10), 9), "recipe_name" / c.Default( c.ExprAdapter( c.StringEncoded( # PaddedString wrapper does not support GBK encoding. c.FixedSized( RECIPE_NAME_MAX_LEN_V1 if is_v1 else RECIPE_NAME_MAX_LEN_V2, c.NullStripped(c.GreedyBytes), ), recipe_name_encoding, ), lambda x, _: x.replace("\n", " "), lambda x, _: x.replace(" ", "\n"), ), "Unnamed", ), c.Padding(1) if is_v1 else c.Padding(2), "recipe_id" / c.Default(c.Int32ub, lambda _: random.randint(0, 2 ** 32 - 1)), "menu_settings" / c.Default( c.BitStruct( # byte 37 "save_recipe" / c.Default(c.Flag, 0), "confirm_start" / c.Default(c.Flag, 0), "menu_unknown3" / c.Default(c.Flag, 0), "menu_unknown4" / c.Default(c.Flag, 0), "menu_unknown5" / c.Default(c.Flag, 0), "menu_unknown6" / c.Default(c.Flag, 0), "menu_unknown7" / c.Default(c.Flag, 0), "menu_unknown8" / c.Default(c.Flag, 0), ), {}, ), "duration_hours" / c.Rebuild( c.Int8ub, lambda ctx: ctx.get("duration_minutes", 0) // 60 ), # byte 38 "duration_minutes" / c.Default( c.ExprAdapter( c.Int8ub, lambda obj, ctx: obj + ctx.duration_hours * 60, c.obj_ % 60 ), 60, ), # byte 39 "duration_max_hours" / c.Rebuild( c.Int8ub, lambda ctx: ctx.get("duration_max_minutes", 0) // 60 ), # byte 40 "duration_max_minutes" / c.Default( c.ExprAdapter( c.Int8ub, lambda obj, ctx: obj + ctx.duration_max_hours * 60, c.obj_ % 60, ), 0, ), # byte 41 "duration_min_hours" / c.Rebuild( c.Int8ub, lambda ctx: ctx.get("duration_min_minutes", 0) // 60 ), # byte 42 "duration_min_minutes" / c.Default( c.ExprAdapter( c.Int8ub, lambda obj, ctx: obj + ctx.duration_min_hours * 60, c.obj_ % 60, ), 0, ), # byte 43 c.Padding(2), # byte 44, 45 "unknown_46" / c.Default(c.Byte, 1), # byte 46, should be set to 1 c.Padding(7) if is_v1 else c.Padding(1), "stages" / c.Default( ArrayDefault( 15, c.Struct( # byte 48-168 "mode" / c.Default(c.Enum(c.Byte, StageMode), StageMode.FireMode), "hours" / c.Rebuild( c.Int8ub, lambda ctx: (ctx.get("minutes", 0) // 60) + 128 ), "minutes" / c.Default( c.ExprAdapter( c.Int8ub, decoder=lambda obj, ctx: obj + (ctx.hours - 128) * 60, encoder=c.obj_ % 60, ), DEFAULT_PHASE_MINUTES, ), "temp_threshold" / c.Default(c.Int8ub, DEFAULT_THRESHOLD_CELCIUS), "temp_target" / c.Default(c.Int8ub, DEFAULT_TEMP_TARGET_CELCIUS), "power" / c.Default(c.Int8ub, DEFAULT_FIRE_LEVEL), "fire_off" / c.Default(c.Int8ub, DEFAULT_FIRE_ON_OFF), "fire_on" / c.Default(c.Int8ub, DEFAULT_FIRE_ON_OFF), ), default=dict( mode=StageMode.FireMode, minutes=DEFAULT_PHASE_MINUTES, temp_threshold=DEFAULT_THRESHOLD_CELCIUS, temp_target=DEFAULT_TEMP_TARGET_CELCIUS, power=DEFAULT_FIRE_LEVEL, fire_off=DEFAULT_FIRE_ON_OFF, fire_on=DEFAULT_FIRE_ON_OFF, ), ), [], ), c.Padding(16) if is_v1 else c.Padding(6), # byte 169-174 "unknown175" / c.Default(c.Int8ub, 0), "unknown176" / c.Default(c.Int8ub, 0), "unknown177" / c.Default(c.Int8ub, 0), "crc" # byte 178-179 / RebuildStream( c.Bytes(2), crc16 ), # Default profiles have invalid crc, c.Checksum() raises undesired error when parsed. )