예제 #1
0
 def _struct(cls):
     return construct.Struct(
         "type" / construct.Enum(construct.Byte, ParamType),
         "value" / construct.Switch(
             construct.this.type,
             {
                 "Int":
                 construct.Int32sl,
                 "Float":
                 construct.ExprAdapter(
                     construct.Bytes(10),
                     lambda obj, ctx: numpy.frombuffer(
                         obj.rjust(16, b"\x00"), dtype=numpy.longdouble),
                     lambda obj, ctx: numpy.longdouble(obj).tobytes()[-10:],
                 ),
                 "Flag":
                 construct.Byte,
                 "Str":
                 construct.PascalString(construct.Int32ul, "cp932"),
             },
             # else 'Var' variable name type
             construct.Select(
                 construct.PascalString(construct.Int32ul, "cp932"), ),
         ),
     )
예제 #2
0
class TimeMachinePlugin(interface.PlistPlugin):
    """Basic plugin to extract time machine hardisk and the backups.

  Further details about the extracted fields:
    DestinationID:
      remote UUID hard disk where the backup is done.

    BackupAlias:
      structure that contains the extra information from the destinationID.

    SnapshotDates:
      list of the backup dates.
  """

    NAME = 'time_machine'
    DESCRIPTION = 'Parser for TimeMachine plist files.'

    PLIST_PATH = 'com.apple.TimeMachine.plist'
    PLIST_KEYS = frozenset(['Destinations', 'RootVolumeUUID'])

    TM_BACKUP_ALIAS = construct.Struct(
        'tm_backup_alias', construct.Padding(10),
        construct.PascalString('value',
                               length_field=construct.UBInt8('length')))

    # pylint: disable=arguments-differ
    def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
        """Extracts relevant TimeMachine entries.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
    """
        destinations = match.get('Destinations', [])
        for destination in destinations:
            destination_identifier = (destination.get('DestinationID', None)
                                      or 'Unknown device')

            alias = destination.get('BackupAlias', '<ALIAS>')
            try:
                alias = self.TM_BACKUP_ALIAS.parse(alias).value
            except construct.FieldError:
                alias = 'Unknown alias'

            event_data = plist_event.PlistTimeEventData()
            event_data.desc = 'TimeMachine Backup in {0:s} ({1:s})'.format(
                alias, destination_identifier)
            event_data.key = 'item/SnapshotDates'
            event_data.root = '/Destinations'

            snapshot_dates = destination.get('SnapshotDates', [])
            for datetime_value in snapshot_dates:
                timestamp = timelib.Timestamp.FromPythonDatetime(
                    datetime_value)
                date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
                    timestamp=timestamp)
                event = time_events.DateTimeValuesEvent(
                    date_time, definitions.TIME_DESCRIPTION_WRITTEN)
                parser_mediator.ProduceEventWithEventData(event, event_data)
예제 #3
0
class TimeMachinePlugin(interface.PlistPlugin):
    """Basic plugin to extract time machine hardisk and the backups.

  Further details about the extracted fields:
    DestinationID:
      remote UUID hard disk where the backup is done.

    BackupAlias:
      structure that contains the extra information from the destinationID.

    SnapshotDates:
      list of the backup dates.
  """

    NAME = u'time_machine'
    DESCRIPTION = u'Parser for TimeMachine plist files.'

    PLIST_PATH = u'com.apple.TimeMachine.plist'
    PLIST_KEYS = frozenset([u'Destinations', u'RootVolumeUUID'])

    TM_BACKUP_ALIAS = construct.Struct(
        u'tm_backup_alias', construct.Padding(10),
        construct.PascalString(u'value',
                               length_field=construct.UBInt8(u'length')))

    def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
        """Extracts relevant TimeMachine entries.

    Args:
      parser_mediator: A parser mediator object (instance of ParserMediator).
      match: Optional dictionary containing keys extracted from PLIST_KEYS.
             The default is None.
    """
        if u'Destinations' not in match:
            return

        root = u'/Destinations'
        key = u'item/SnapshotDates'

        # For each TimeMachine devices.
        for destination in match[u'Destinations']:
            hd_uuid = destination.get(u'DestinationID', None)
            if not hd_uuid:
                hd_uuid = u'Unknown device'

            alias = destination.get(u'BackupAlias', u'<ALIAS>')
            try:
                alias = self.TM_BACKUP_ALIAS.parse(alias).value
            except construct.FieldError:
                alias = u'Unknown alias'

            # For each Backup.
            for timestamp in destination.get(u'SnapshotDates', []):
                description = u'TimeMachine Backup in {0:s} ({1:s})'.format(
                    alias, hd_uuid)
                event_object = plist_event.PlistEvent(root, key, timestamp,
                                                      description)
                parser_mediator.ProduceEvent(event_object)
예제 #4
0
 def _struct(cls):
     return construct.Struct(
         "type" / construct.Enum(construct.Byte, OpeDataType),
         "name" / construct.PascalString(construct.Int32ul, "cp932"),
         "count" / construct.Int32ul,
         "func" / construct.Switch(
             construct.this.type,
             {"Func": construct.Enum(construct.Byte, OpeFuncType)}),
         "operands" /
         construct.Array(construct.this.count, Param._struct()),
     )
예제 #5
0
 def _struct(cls):
     return construct.Struct(
         'type' / construct.Enum(construct.Byte, OpeDataType),
         'name' / construct.PascalString(construct.Int32ul, 'cp932'),
         'count' / construct.Int32ul,
         'func' / construct.Switch(
             construct.this.type, {
                 'Func': construct.Enum(construct.Byte, OpeFuncType),
             }),
         'operands' /
         construct.Array(construct.this.count, Param._struct()),
     )
예제 #6
0
 def _struct(cls):
     return construct.Struct(
         'type' / construct.Enum(construct.Byte, ParamType),
         'value' / construct.Switch(
             construct.this.type,
             {
                 'Int': construct.Int32ul,
                 'Float': construct.ExprAdapter(
                     construct.Bytes(10),
                     lambda obj, ctx: numpy.frombuffer(obj.rjust(16, b'\x00'),
                                                       dtype=numpy.longdouble),
                     lambda obj, ctx: numpy.longdouble(obj).tobytes()[-10:]
                 ),
                 'Flag': construct.Byte,
                 'Str': construct.PascalString(construct.Int32ul, 'cp932'),
             },
             # else 'Var' variable name type
             construct.Select(
                 construct.PascalString(construct.Int32ul, 'cp932'),
             ),
         ),
     )
예제 #7
0
class TimeMachinePlugin(interface.PlistPlugin):
    """Basic plugin to extract time machine hardisk and the backups."""

    NAME = 'plist_timemachine'
    DESCRIPTION = u'Parser for TimeMachine plist files.'

    PLIST_PATH = 'com.apple.TimeMachine.plist'
    PLIST_KEYS = frozenset(['Destinations', 'RootVolumeUUID'])

    # Generated events:
    # DestinationID: remote UUID hard disk where the backup is done.
    # BackupAlias: structure that contains the extra information from the
    #              destinationID.
    # SnapshotDates: list of the backup dates.

    TM_BACKUP_ALIAS = construct.Struct(
        'tm_backup_alias', construct.Padding(10),
        construct.PascalString('value',
                               length_field=construct.UBInt8('length')))

    def GetEntries(self, parser_context, match=None, **unused_kwargs):
        """Extracts relevant TimeMachine entries.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      match: Optional dictionary containing keys extracted from PLIST_KEYS.
             The default is None.
    """
        root = '/Destinations'
        key = 'item/SnapshotDates'

        # For each TimeMachine devices.
        for destination in match['Destinations']:
            hd_uuid = destination['DestinationID']
            if not hd_uuid:
                hd_uuid = u'Unknown device'
            alias = destination['BackupAlias']
            try:
                alias = self.TM_BACKUP_ALIAS.parse(alias).value
            except construct.FieldError:
                alias = u'Unknown alias'
            # For each Backup.
            for timestamp in destination['SnapshotDates']:
                description = u'TimeMachine Backup in {0:s} ({1:s})'.format(
                    alias, hd_uuid)
                event_object = plist_event.PlistEvent(root, key, timestamp,
                                                      description)
                parser_context.ProduceEvent(event_object,
                                            plugin_name=self.NAME)
예제 #8
0
def SGString(encoding='utf8'):
    """
    Defines a null terminated `PascalString`.

    The Smartglass protocol seems to always add a termination character
    behind a length prefixed string.
    This utility function combines a `PascalString` with a `TerminatedField`.

    Args:
        encoding (str): The string encoding to use for the `PascalString`.

    Returns:
        SGString: A null byte terminated `PascalString`.
    """
    return TerminatedField(construct.PascalString(construct.Int16ub, encoding))
예제 #9
0
파일: timemachine.py 프로젝트: iwm911/plaso
class TimeMachinePlugin(interface.PlistPlugin):
    """Basic plugin to extract time machine hardisk and the backups."""

    NAME = 'plist_timemachine'

    PLIST_PATH = 'com.apple.TimeMachine.plist'
    PLIST_KEYS = frozenset(['Destinations', 'RootVolumeUUID'])

    # Yield Events
    #
    # DestinationID: remote UUID hard disk where the backup is done.
    # BackupAlias: structure that contains the extra information from the
    #              destinationID.
    # SnapshotDates: list of the backup dates.

    TM_BACKUP_ALIAS = construct.Struct(
        'tm_backup_alias', construct.Padding(10),
        construct.PascalString('value',
                               length_field=construct.UBInt8('length')))

    def GetEntries(self, match, **unused_kwargs):
        """Extracts relevant TimeMachine entries.

    Args:
      match: A dictionary containing keys extracted from PLIST_KEYS.

    Yields:
      EventObject objects extracted from the plist.
    """

        root = '/Destinations'
        key = 'item/SnapshotDates'
        # For each TimeMachine devices.
        for destination in match['Destinations']:
            hd_uuid = destination['DestinationID']
            if not hd_uuid:
                hd_uuid = u'Unknown device'
            alias = destination['BackupAlias']
            try:
                alias = self.TM_BACKUP_ALIAS.parse(alias).value
            except construct.FieldError:
                alias = u'Unknown alias'
            # For each Backup.
            for timestamp in destination['SnapshotDates']:
                description = u'TimeMachine Backup in {} ({})'.format(
                    alias, hd_uuid)
                yield plist_event.PlistEvent(root, key, timestamp, description)
예제 #10
0
def gate_factory(class_dict):
    normal_color_struct = cs.Struct(cs.Padding(2), 'category' / cs.Byte,
                                    'color' / cs.Byte)
    offset_color_struct = cs.Struct('source' / cs.Int,
                                    cs.Const(class_dict['color_triplet']),
                                    'hsv' / cs.Single[3])
    color_entry_struct = cs.Struct(
        'type' / cs.RawCopy(cs.Short), 'colors' / cs.Switch(
            cs.this.type.data, {
                class_dict['normal_color']: normal_color_struct,
                class_dict['offset_color']: offset_color_struct
            }))
    color_struct = cs.Struct(
        cs.Const(class_dict['colorization']), 'num_entries' / cs.Int,
        'colorizations' / color_entry_struct[cs.this.num_entries])
    level_struct = cs.Struct(
        cs.Const(class_dict['gate_level']),
        cs.Const(b'\x01'), 'level_name' / cs.PascalString(cs.Short, 'ascii'),
        cs.Const(b'\x01'), 'level_icon' / cs.PascalString(cs.Short, 'ascii'),
        'colorization' / color_struct, cs.Const(b'\x01'),
        'description' / cs.PascalString(cs.Short, 'ascii'),
        cs.Const(class_dict['level_type']), 'level_type' / cs.Byte,
        'restricted' / cs.Byte)
    velocity_struct = cs.Struct('velocity' / cs.Single, cs.Const(b'\x01'),
                                'num_entries' / cs.Int,
                                'lengths' / cs.Single[cs.this.num_entries],
                                'start' / cs.Long)
    wheel_struct = cs.Struct(
        'class_id' / cs.RawCopy(cs.Short), 'unknown' / cs.Byte[0x05],
        'num_levels' / cs.Short, 'levels' / level_struct[cs.this.num_levels],
        'velocity' / cs.If(cs.this.class_id.data != class_dict['random_depth'],
                           velocity_struct))
    gate_struct = cs.Struct(
        cs.Const(b'\x01'), 'gate_id' / cs.Int,
        cs.Const(b'\x01'), 'gate_name' / cs.PascalString(cs.Short, 'ascii'),
        cs.Const(b'\x01'), 'gate_icon' / cs.PascalString(cs.Short, 'ascii'),
        'colorization' / color_struct, cs.Const(b'\x01'),
        'description' / cs.PascalString(cs.Short, 'ascii'),
        'unknown' / cs.Byte[0x16], 'num_wheels' / cs.Int,
        'wheels' / wheel_struct[cs.this.num_wheels], 'class_id' / cs.Int16sb,
        cs.If(cs.this.class_id < 0, cs.PascalString(cs.Short,
                                                    'ascii')), 'themes' /
        cs.Struct('unknown' / cs.Byte[0x07], 'themes' / cs.Byte[0x06]))

    return gate_struct
        low = obj & 0xFFFFFFFF
        IdaVarInt32._build(low, stream, context, path)
        high = obj >> 32
        IdaVarInt32._build(high, stream, context, path)

        return obj


#######################################
#
# Basic types & helpers
#
#######################################

# String prefixed with a variable int size
VarString = con.PascalString(IdaVarInt32, "utf8")
# Bytes buffer prefixed with a variable int size
VarBuff = con.Prefixed(IdaVarInt32, con.GreedyBytes)
# IDA typedefs
ea_t = asize_t = adiff_t = con.ExprAdapter(IdaVarInt64, con.obj_ - 1,
                                           con.obj_ + 1)


# "template" for defining object list, prefixed with a variable int size
def ObjectList(obj):
    return con.PrefixedArray(IdaVarInt32, obj)


#######################################
#
# Lumina types
예제 #12
0
from typing import Any, Dict, Generator, Optional

import construct

from glucometerutils import common, driver
from glucometerutils.support import lifescan, lifescan_binary_protocol, serial

_PACKET = lifescan_binary_protocol.LifeScanPacket(False)

_COMMAND_SUCCESS = construct.Const(b"\x03\x06")

_VERSION_REQUEST = construct.Const(b"\x03\x0d\x01")

_VERSION_RESPONSE = construct.Struct(
    _COMMAND_SUCCESS,
    "version" / construct.PascalString(construct.Byte, encoding="ascii"),
    # NULL-termination is not included in string length.
    construct.Const(b"\x00"),
)

_SERIAL_NUMBER_REQUEST = construct.Const(b"\x03\x0b\x01\x02")

_SERIAL_NUMBER_RESPONSE = construct.Struct(
    _COMMAND_SUCCESS,
    "serial_number" / construct.CString(encoding="ascii"),
)

_READ_RTC_REQUEST = construct.Const(b"\x03\x20\x02")

_READ_RTC_RESPONSE = construct.Struct(
    _COMMAND_SUCCESS,
예제 #13
0
VendorHeader = c.Struct(
    "_start_offset" / c.Tell,
    "magic" / c.Const(b"TRZV"),
    "header_len" / c.Int32ul,
    "expiry" / c.Int32ul,
    "version" / c.Struct(
        "major" / c.Int8ul,
        "minor" / c.Int8ul,
    ),
    "sig_m" / c.Int8ul,
    "sig_n" / c.Rebuild(c.Int8ul, c.len_(c.this.pubkeys)),
    "trust" / VendorTrust,
    "_reserved" / c.Padding(14),
    "pubkeys" / c.Bytes(32)[c.this.sig_n],
    "text" / c.Aligned(4, c.PascalString(c.Int8ul, "utf-8")),
    "image" / Toif,
    "_end_offset" / c.Tell,
    "_min_header_len" / c.Check(
        c.this.header_len > (c.this._end_offset - c.this._start_offset) + 65),
    "_header_len_aligned" / c.Check(c.this.header_len % 512 == 0),
    c.Padding(c.this.header_len - c.this._end_offset + c.this._start_offset -
              65),
    "sigmask" / c.Byte,
    "signature" / c.Bytes(64),
)

VersionLong = c.Struct(
    "major" / c.Int8ul,
    "minor" / c.Int8ul,
    "patch" / c.Int8ul,
예제 #14
0
파일: asl.py 프로젝트: cvandeplas/plaso
class AslParser(interface.BaseParser):
    """Parser for ASL log files."""

    NAME = 'asl_log'
    DESCRIPTION = u'Parser for ASL log files.'

    ASL_MAGIC = 'ASL DB\x00\x00\x00\x00\x00\x00'

    # If not right assigned, the value is "-1".
    ASL_NO_RIGHTS = 'ffffffff'

    # Priority level (criticity)
    ASL_MESSAGE_PRIORITY = {
        0: 'EMERGENCY',
        1: 'ALERT',
        2: 'CRITICAL',
        3: 'ERROR',
        4: 'WARNING',
        5: 'NOTICE',
        6: 'INFO',
        7: 'DEBUG'
    }

    # ASL File header.
    # magic: magic number that identify ASL files.
    # version: version of the file.
    # offset: first record in the file.
    # timestamp: epoch time when the first entry was written.
    # last_offset: last record in the file.
    ASL_HEADER_STRUCT = construct.Struct('asl_header_struct',
                                         construct.String('magic', 12),
                                         construct.UBInt32('version'),
                                         construct.UBInt64('offset'),
                                         construct.UBInt64('timestamp'),
                                         construct.UBInt32('cache_size'),
                                         construct.UBInt64('last_offset'),
                                         construct.Padding(36))

    # The record structure is:
    # [HEAP][STRUCTURE][4xExtraField][2xExtraField]*[PreviousEntry]
    # Record static structure.
    # tam_entry: it contains the number of bytes from this file position
    #            until the end of the record, without counts itself.
    # next_offset: next record. If is equal to 0x00, it is the last record.
    # asl_message_id: integer that has the numeric identification of the event.
    # timestamp: Epoch integer that has the time when the entry was created.
    # nanosecond: nanosecond to add to the timestamp.
    # level: level of priority.
    # pid: process identification that ask to save the record.
    # uid: user identification that has lunched the process.
    # gid: group identification that has lunched the process.
    # read_uid: identification id of a user. Only applied if is not -1 (all FF).
    #           Only root and this user can read the entry.
    # read_gid: the same than read_uid, but for the group.
    ASL_RECORD_STRUCT = construct.Struct('asl_record_struct',
                                         construct.Padding(2),
                                         construct.UBInt32('tam_entry'),
                                         construct.UBInt64('next_offset'),
                                         construct.UBInt64('asl_message_id'),
                                         construct.UBInt64('timestamp'),
                                         construct.UBInt32('nanosec'),
                                         construct.UBInt16('level'),
                                         construct.UBInt16('flags'),
                                         construct.UBInt32('pid'),
                                         construct.UBInt32('uid'),
                                         construct.UBInt32('gid'),
                                         construct.UBInt32('read_uid'),
                                         construct.UBInt32('read_gid'),
                                         construct.UBInt64('ref_pid'))

    ASL_RECORD_STRUCT_SIZE = ASL_RECORD_STRUCT.sizeof()

    # 8-byte fields, they can be:
    # - String: [Nibble = 1000 (8)][Nibble = Length][7 Bytes = String].
    # - Integer: integer that has the byte position in the file that points
    #            to an ASL_RECORD_DYN_VALUE struct. If the value of the integer
    #            is equal to 0, it means that it has not data (skip).

    # If the field is a String, we use this structure to decode each
    # integer byte in the corresponding character (ASCII Char).
    ASL_OCTET_STRING = construct.ExprAdapter(construct.Octet('string'),
                                             encoder=lambda obj, ctx: ord(obj),
                                             decoder=lambda obj, ctx: chr(obj))

    # Field string structure. If the first bit is 1, it means that it
    # is a String (1000) = 8, then the next nibble has the number of
    # characters. The last 7 bytes are the number of bytes.
    ASL_STRING = construct.BitStruct(
        'string', construct.Flag('type'), construct.Bits('filler', 3),
        construct.If(lambda ctx: ctx.type, construct.Nibble('string_length')),
        construct.If(lambda ctx: ctx.type,
                     construct.Array(7, ASL_OCTET_STRING)))

    # 8-byte pointer to a byte position in the file.
    ASL_POINTER = construct.UBInt64('pointer')

    # Dynamic data structure pointed by a pointer that contains a String:
    # [2 bytes padding][4 bytes lenght of String][String].
    ASL_RECORD_DYN_VALUE = construct.Struct(
        'asl_record_dyn_value', construct.Padding(2),
        construct.PascalString('value',
                               length_field=construct.UBInt32('length')))

    def Parse(self, parser_context, file_entry):
        """Extract entries from an ASL file.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      file_entry: A file entry object (instance of dfvfs.FileEntry).
    """
        file_object = file_entry.GetFileObject()
        file_object.seek(0, os.SEEK_SET)

        try:
            header = self.ASL_HEADER_STRUCT.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            file_object.close()
            raise errors.UnableToParseFile(
                u'Unable to parse ASL Header with error: {0:s}.'.format(
                    exception))

        if header.magic != self.ASL_MAGIC:
            file_object.close()
            raise errors.UnableToParseFile(
                u'Not an ASL Header, unable to parse.')

        # Get the first and the last entry.
        offset = header.offset
        old_offset = header.offset
        last_offset_header = header.last_offset

        # If the ASL file has entries.
        if offset:
            event_object, offset = self.ReadAslEvent(file_object, offset)
            while event_object:
                parser_context.ProduceEvent(event_object,
                                            parser_name=self.NAME,
                                            file_entry=file_entry)

                # TODO: an anomaly object must be emitted once that is implemented.
                # Sanity check, the last read element must be the same as
                # indicated by the header.
                if offset == 0 and old_offset != last_offset_header:
                    logging.warning(u'Parsing ended before the header ends.')
                old_offset = offset
                event_object, offset = self.ReadAslEvent(file_object, offset)

        file_object.close()

    def ReadAslEvent(self, file_object, offset):
        """Returns an AslEvent from a single ASL entry.

    Args:
      file_object: a file-like object that points to an ASL file.
      offset: offset where the static part of the entry starts.

    Returns:
      An event object constructed from a single ASL record.
    """
        # The heap of the entry is saved to try to avoid seek (performance issue).
        # It has the real start position of the entry.
        dynamic_start = file_object.tell()
        dynamic_part = file_object.read(offset - file_object.tell())

        if not offset:
            return None, None

        try:
            record_header = self.ASL_RECORD_STRUCT.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            logging.warning(
                u'Unable to parse ASL event with error: {0:s}'.format(
                    exception))
            return None, None

        # Variable tam_fields = is the real length of the dynamic fields.
        # We have this: [Record_Struct] + [Dynamic_Fields] + [Pointer_Entry_Before]
        # In Record_Struct we have a field called tam_entry, where it has the number
        # of bytes until the end of the entry from the position that the field is.
        # The tam_entry is between the 2th and the 6th byte in the [Record_Struct].
        # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before]
        # Also, we do not need [Point_Entry_Before] and then we delete the size of
        # [Point_Entry_Before] that it is 8 bytes (8):
        # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before]
        # [Dynamic_Fields] = tam_entry - [Record_Struct] + 6 - 8
        # [Dynamic_Fields] = tam_entry - [Record_Struct] - 2
        tam_fields = record_header.tam_entry - self.ASL_RECORD_STRUCT_SIZE - 2

        # Dynamic part of the entry that contains minimal four fields of 8 bytes
        # plus 2x[8bytes] fields for each extra ASL_Field.
        # The four first fields are always the Host, Sender, Facility and Message.
        # After the four first fields, the entry might have extra ASL_Fields.
        # For each extra ASL_field, it has a pair of 8-byte fields where the first
        # 8 bytes contains the name of the extra ASL_field and the second 8 bytes
        # contains the text of the exta field.
        # All of this 8-byte field can be saved using one of these three differents
        # types:
        # - Null value ('0000000000000000'): nothing to do.
        # - String: It is string if first bit = 1 or first nibble = 8 (1000).
        #           Second nibble has the length of string.
        #           The next 7 bytes have the text characters of the string
        #           padding the end with null characters: '0x00'.
        #           Example: [8468 6964 6400 0000]
        #                    [8] String, [4] length, value: [68 69 64 64] = hidd.
        # - Pointer: static position in the file to a special struct
        #            implemented as an ASL_RECORD_DYN_VALUE.
        #            Example: [0000 0000 0000 0077]
        #            It points to the file position 0x077 that has a
        #            ASL_RECORD_DYN_VALUE structure.
        values = []
        while tam_fields > 0:
            try:
                raw_field = file_object.read(8)
            except (IOError, construct.FieldError) as exception:
                logging.warning(
                    u'Unable to parse ASL event with error: {0:d}'.format(
                        exception))
                return None, None
            try:
                # Try to read as a String.
                field = self.ASL_STRING.parse(raw_field)
                values.append(''.join(field.string[0:field.string_length]))
                # Go to parse the next extra field.
                tam_fields -= 8
                continue
            except ValueError:
                pass
            # If it is not a string, it must be a pointer.
            try:
                field = self.ASL_POINTER.parse(raw_field)
            except ValueError as exception:
                logging.warning(
                    u'Unable to parse ASL event with error: {0:s}'.format(
                        exception))
                return None, None
            if field != 0:
                # The next IF ELSE is only for performance issues, avoiding seek.
                # If the pointer points a lower position than where the actual entry
                # starts, it means that it points to a previuos entry.
                pos = field - dynamic_start
                # Bigger or equal 0 means that the data is in the actual entry.
                if pos >= 0:
                    try:
                        values.append((self.ASL_RECORD_DYN_VALUE.parse(
                            dynamic_part[pos:])).value.partition('\x00')[0])
                    except (IOError, construct.FieldError) as exception:
                        logging.warning(
                            u'Unable to parse ASL event with error: {0:s}'.
                            format(exception))
                        return None, None
                else:
                    # Only if it is a pointer that points to the
                    # heap from another entry we use the seek method.
                    main_position = file_object.tell()
                    # If the pointer is in a previous entry.
                    if main_position > field:
                        file_object.seek(field - main_position, os.SEEK_CUR)
                        try:
                            values.append(
                                (self.ASL_RECORD_DYN_VALUE.parse_stream(
                                    file_object)).value.partition('\x00')[0])
                        except (IOError, construct.FieldError):
                            logging.warning((
                                u'The pointer at {0:d} (0x{0:x}) points to invalid '
                                u'information.'
                            ).format(main_position -
                                     self.ASL_POINTER.sizeof()))
                        # Come back to the position in the entry.
                        _ = file_object.read(main_position -
                                             file_object.tell())
                    else:
                        _ = file_object.read(field - main_position)
                        values.append((self.ASL_RECORD_DYN_VALUE.parse_stream(
                            file_object)).value.partition('\x00')[0])
                        # Come back to the position in the entry.
                        file_object.seek(main_position - file_object.tell(),
                                         os.SEEK_CUR)
            # Next extra field: 8 bytes more.
            tam_fields -= 8

        # Read the last 8 bytes of the record that points to the previous entry.
        _ = file_object.read(8)

        # Parsed section, we translate the read data to an appropriate format.
        microsecond = record_header.nanosec // 1000
        timestamp = timelib.Timestamp.FromPosixTimeWithMicrosecond(
            record_header.timestamp, microsecond)
        record_position = offset
        message_id = record_header.asl_message_id
        level = u'{0} ({1})'.format(
            self.ASL_MESSAGE_PRIORITY[record_header.level],
            record_header.level)
        # If the value is -1 (FFFFFFFF), it can be read by everyone.
        if record_header.read_uid != int(self.ASL_NO_RIGHTS, 16):
            read_uid = record_header.read_uid
        else:
            read_uid = 'ALL'
        if record_header.read_gid != int(self.ASL_NO_RIGHTS, 16):
            read_gid = record_header.read_gid
        else:
            read_gid = 'ALL'

        # Parsing the dynamic values (text or pointers to position with text).
        # The first four are always the host, sender, facility, and message.
        computer_name = values[0]
        sender = values[1]
        facility = values[2]
        message = values[3]

        # If the entry has an extra fields, they works as a pairs:
        # The first is the name of the field and the second the value.
        extra_information = ''
        if len(values) > 4:
            values = values[4:]
            for index in xrange(0, len(values) // 2):
                extra_information += (u'[{0}: {1}]'.format(
                    values[index * 2], values[(index * 2) + 1]))

        # Return the event and the offset for the next entry.
        return AslEvent(timestamp, record_position, message_id, level,
                        record_header, read_uid, read_gid, computer_name,
                        sender, facility, message,
                        extra_information), record_header.next_offset
예제 #15
0
                                     construct.UBInt16('flags'),
                                     construct.UBInt32('pid'),
                                     construct.UBInt32('uid'),
                                     construct.UBInt32('gid'),
                                     construct.UBInt32('read_uid'),
                                     construct.UBInt32('read_gid'),
                                     construct.UBInt64('ref_pid'))

# Pointer Values
ASL_RECORD_ADDR_TXT = construct.Struct('addr_or_text',
                                       construct.String('addr_txt', 8))

# Pointer Dynamic Value
ASL_RECORD_DYN_VALUE = construct.Struct(
    'asl_record_text_header', construct.Padding(2),
    construct.PascalString('value', length_field=construct.UBInt32('length')))


# Print the header of the file
def printHeader(header):
    print "\nASL Header:"
    print " Version: " + str(header.version)
    print " Timestamp: " + str(header.timestamp)
    print " FirstRecord: " + hex(header.offset)
    print " LastRecord: " + hex(header.last_offset) + "\n"


# Print a record value
#
# Args:
#  record_header: values from the Record_Struct part.
예제 #16
0
 def struct(cls):
     return construct.Struct(
         "signature" / construct.Const(b"vf"),
         "version" / _LMArchiveVersionValidator(construct.Int32ul),
         "count" / construct.Int32ul,
         "filenames" / construct.Array(
             construct.this.count,
             construct.IfThenElse(
                 construct.this.version >= 100,
                 construct.Prefixed(
                     construct.Int32ul,
                     construct.Transformed(
                         construct.GreedyString('cp932'),
                         LMObfuscator().transform_bytes,
                         None,
                         LMObfuscator().transform_bytes,
                         None,
                     ),
                 ),
                 construct.PascalString(construct.Int32ul, 'cp932'),
             )),
         "offsets" / construct.Array(
             construct.this.count + 1,
             construct.Struct(
                 "offset_low" / construct.IfThenElse(
                     construct.this._.version >= 100,
                     construct.Transformed(
                         construct.Int32ul,
                         LMObfuscator().transform_int,
                         4,
                         LMObfuscator().transform_int,
                         4,
                     ),
                     construct.Int32ul,
                 ),
                 # offset_high always 0 if ver < 101
                 "offset_high" / construct.IfThenElse(
                     construct.this._.version >= 101,
                     construct.Transformed(
                         construct.Int32ul,
                         LMObfuscator().transform_int,
                         4,
                         LMObfuscator().transform_int_high,
                         4,
                     ),
                     construct.Int32ul,
                 ),
             ),
         ),
         "compress_types" /
         construct.Array(construct.this.count,
                         construct.Enum(construct.Byte, LMCompressType)),
         "unk1s" / construct.Array(
             construct.this.count,
             # construct.Transformed(
             #     construct.Int32ul,
             #     LMObfuscator().transform_int,
             #     4,
             #     LMObfuscator().transform_int,
             #     4,
             # ),
             construct.Int32ul,
         ),
         "checksums" / construct.Array(
             construct.this.count,
             construct.Int32ul,
         ),
         "encrypt_flags" / construct.Array(
             construct.this.count,
             construct.Byte,
         ),
     )
예제 #17
0
from glucometerutils import common
from glucometerutils.support import construct_extras
from glucometerutils.support import lifescan
from glucometerutils.support import lifescan_binary_protocol
from glucometerutils.support import serial

_PACKET = lifescan_binary_protocol.LifeScanPacket(False)

_COMMAND_SUCCESS = construct.Const(b'\x03\x06')

_VERSION_REQUEST = construct.Const(b'\x03\x0d\x01')

_VERSION_RESPONSE = construct.Struct(
    _COMMAND_SUCCESS,
    'version' / construct.PascalString(construct.Byte, encoding='ascii'),
    # NULL-termination is not included in string length.
    construct.Const(b'\x00'),
)

_SERIAL_NUMBER_REQUEST = construct.Const(b'\x03\x0b\x01\x02')

_SERIAL_NUMBER_RESPONSE = construct.Struct(
    _COMMAND_SUCCESS,
    'serial_number' / construct.CString(encoding='ascii'),
)

_READ_RTC_REQUEST = construct.Const(b'\x03\x20\x02')

_READ_RTC_RESPONSE = construct.Struct(
    _COMMAND_SUCCESS,
예제 #18
0
VendorHeader = c.Struct(
    "_start_offset" / c.Tell,
    "magic" / c.Const(b"TRZV"),
    "_header_len" / c.Padding(4),
    "expiry" / c.Int32ul,
    "version" / c.Struct(
        "major" / c.Int8ul,
        "minor" / c.Int8ul,
    ),
    "vendor_sigs_required" / c.Int8ul,
    "vendor_sigs_n" / c.Rebuild(c.Int8ul, c.len_(c.this.pubkeys)),
    "vendor_trust" / VendorTrust,
    "reserved" / c.Padding(14),
    "pubkeys" / c.Bytes(32)[c.this.vendor_sigs_n],
    "vendor_string" / c.Aligned(4, c.PascalString(c.Int8ul, "utf-8")),
    "vendor_image" / Toif,
    "_data_end_offset" / c.Tell,
    c.Padding(-(c.this._data_end_offset + 65) % 512),
    "sigmask" / c.Byte,
    "signature" / c.Bytes(64),
    "_end_offset" / c.Tell,
    "header_len" /
    c.Pointer(c.this._start_offset + 4,
              c.Rebuild(c.Int32ul, c.this._end_offset - c.this._start_offset)),
)

VersionLong = c.Struct(
    "major" / c.Int8ul,
    "minor" / c.Int8ul,
    "patch" / c.Int8ul,
예제 #19
0
 def _struct(cls):
     return construct.Struct(
         "signature" / construct.Const(b"LivePrevMenu"),
         "version" /
         LPMVersionValidator(_LPMVersionAdapter(construct.Bytes(3))),
         "unk1" / construct.Bytes(8),
         "buttons" / construct.PrefixedArray(
             construct.Int32ul,
             construct.Struct(
                 "width" / construct.Int32ul,
                 "height" / construct.Int32ul,
                 "src" / construct.PascalString(construct.Int32ul, "cp932"),
                 "unk2" / construct.Byte,
                 "name" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "src_selected" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "unk3" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "unk4" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "unk5" / construct.If(
                     construct.this._._.version > 100,
                     construct.PascalString(construct.Int32ul, "cp932"),
                 ),
                 "unk6" / construct.If(
                     construct.this._._.version > 102,
                     construct.Struct(
                         construct.PascalString(construct.Int32ul, "cp932"),
                         construct.PascalString(construct.Int32ul, "cp932"),
                     ),
                 ),
                 "unk7" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "unk8" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "unk9" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "unk10" / construct.If(
                     construct.this._._.version > 101,
                     construct.Struct(
                         construct.PascalString(construct.Int32ul, "cp932"),
                         construct.PascalString(construct.Int32ul, "cp932"),
                     ),
                 ),
                 "unk15" / construct.Int32ul,
                 "unk16" / construct.Int32ul,
                 "unk17" /
                 construct.PascalString(construct.Int32ul, "cp932"),
                 "unk18" / construct.If(
                     construct.this._._.version > 103,
                     construct.Struct(
                         construct.PascalString(construct.Int32ul, "cp932"),
                         construct.PascalString(construct.Int32ul, "cp932"),
                         construct.PascalString(construct.Int32ul, "cp932"),
                         construct.PascalString(construct.Int32ul, "cp932"),
                         construct.PascalString(construct.Int32ul, "cp932"),
                         construct.Int32ul,
                     ),
                 ),
                 "unk19" / construct.If(
                     construct.this._._.version > 104,
                     construct.PascalString(construct.Int32ul, "cp932"),
                 ),
                 "unk20" / construct.If(
                     construct.this._._.version > 105,
                     construct.PascalString(construct.Int32ul, "cp932"),
                 ),
             ),
         ),
     )
예제 #20
0
class CupsIppParser(interface.FileObjectParser):
    """Parser for CUPS IPP files. """

    NAME = 'cups_ipp'
    DESCRIPTION = 'Parser for CUPS IPP files.'

    # INFO:
    # For each file, we have only one document with three different timestamps:
    # Created, process and finished.
    # Format:
    # [HEADER: MAGIC + KNOWN_TYPE][GROUP A]...[GROUP Z][GROUP_END: 0x03]
    # GROUP: [GROUP ID][PAIR A]...[PAIR Z] where [PAIR: NAME + VALUE]
    #   GROUP ID: [1byte ID]
    #   PAIR: [TagID][\x00][Name][Value])
    #     TagID: 1 byte integer with the type of "Value".
    #     Name: [Length][Text][\00]
    #       Name can be empty when the name has more than one value.
    #       Example: family name "lopez mata" with more than one surname.
    #       Type_Text + [0x06, family, 0x00] + [0x05, lopez, 0x00] +
    #       Type_Text + [0x00, 0x00] + [0x04, mata, 0x00]
    #     Value: can be integer, boolean, or text provided by TagID.
    #       If boolean, Value: [\x01][0x00(False)] or [\x01(True)]
    #       If integer, Value: [\x04][Integer]
    #       If text,    Value: [Length text][Text][\00]

    # Magic number that identify the CUPS IPP supported version.
    IPP_MAJOR_VERSION = 2
    IPP_MINOR_VERSION = 0
    # Supported Operation ID.
    IPP_OP_ID = 5

    # CUPS IPP File header.
    CUPS_IPP_HEADER = construct.Struct('cups_ipp_header_struct',
                                       construct.UBInt8('major_version'),
                                       construct.UBInt8('minor_version'),
                                       construct.UBInt16('operation_id'),
                                       construct.UBInt32('request_id'))

    # Group ID that indicates the end of the IPP Control file.
    GROUP_END = 3
    # Identification Groups.
    GROUP_LIST = [1, 2, 4, 5, 6, 7]

    # Type ID, per cups source file ipp-support.c.
    TYPE_GENERAL_INTEGER = 0x20
    TYPE_INTEGER = 0x21
    TYPE_BOOL = 0x22
    TYPE_ENUMERATION = 0x23
    TYPE_DATETIME = 0x31

    # Type of values that can be extracted.
    INTEGER_8 = construct.UBInt8('integer')
    INTEGER_32 = construct.UBInt32('integer')
    TEXT = construct.PascalString('text',
                                  length_field=construct.UBInt8('length'))
    BOOLEAN = construct.Struct('boolean_value', construct.Padding(1),
                               INTEGER_8)
    INTEGER = construct.Struct('integer_value', construct.Padding(1),
                               INTEGER_32)

    # This is an RFC2579 datetime.
    DATETIME = construct.Struct(
        'datetime',
        construct.Padding(1),
        construct.UBInt16('year'),
        construct.UBInt8('month'),
        construct.UBInt8('day'),
        construct.UBInt8('hour'),
        construct.UBInt8('minutes'),
        construct.UBInt8('seconds'),
        construct.UBInt8('deciseconds'),
        construct.String('direction_from_utc', length=1, encoding='ascii'),
        construct.UBInt8('hours_from_utc'),
        construct.UBInt8('minutes_from_utc'),
    )

    # Name of the pair.
    PAIR_NAME = construct.Struct('pair_name', TEXT, construct.Padding(1))

    # Specific CUPS IPP to generic name.
    _NAME_PAIR_TRANSLATION = {
        'com.apple.print.JobInfo.PMApplicationName': 'application',
        'com.apple.print.JobInfo.PMJobOwner': 'owner',
        'DestinationPrinterID': 'printer_id',
        'document-format': 'doc_type',
        'job-name': 'job_name',
        'job-originating-host-name': 'computer_name',
        'job-originating-user-name': 'user',
        'job-uuid': 'job_id',
        'printer-uri': 'uri'
    }

    _DATE_TIME_VALUES = {
        'date-time-at-creation': definitions.TIME_DESCRIPTION_CREATION,
        'date-time-at-processing': definitions.TIME_DESCRIPTION_START,
        'date-time-at-completed': definitions.TIME_DESCRIPTION_END
    }

    _POSIX_TIME_VALUES = {
        'time-at-creation': definitions.TIME_DESCRIPTION_CREATION,
        'time-at-processing': definitions.TIME_DESCRIPTION_START,
        'time-at-completed': definitions.TIME_DESCRIPTION_END
    }

    _DATE_TIME_VALUE_NAMES = list(_DATE_TIME_VALUES.keys())
    _DATE_TIME_VALUE_NAMES.extend(list(_POSIX_TIME_VALUES.keys()))

    def _GetStringValue(self, data_dict, name, default_value=None):
        """Retrieves a specific string value from the data dict.

    Args:
      data_dict (dict[str, list[str]): values per name.
      name (str): name of the value to retrieve.

    Returns:
      str: value represented as a string.
    """
        values = data_dict.get(name, None)
        if not values:
            return default_value

        for index, value in enumerate(values):
            if ',' in value:
                values[index] = '"{0:s}"'.format(value)

        return ', '.join(values)

    def _ReadPair(self, parser_mediator, file_object):
        """Reads an attribute name and value pair from a CUPS IPP event.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_object (dfvfs.FileIO): file-like object.

    Returns:
      tuple: contains:

        str: name or None.
        str: value or None.
    """
        # Pair = Type ID + Name + Value.
        try:
            # Can be:
            #   Group ID + IDtag = Group ID (1byte) + Tag ID (1byte) + '0x00'.
            #   IDtag = Tag ID (1byte) + '0x00'.
            type_id = self.INTEGER_8.parse_stream(file_object)
            if type_id == self.GROUP_END:
                return None, None

            elif type_id in self.GROUP_LIST:
                # If it is a group ID we must read the next byte that contains
                # the first TagID.
                type_id = self.INTEGER_8.parse_stream(file_object)

            # 0x00 separator character.
            self.INTEGER_8.parse_stream(file_object)

        except (IOError, construct.FieldError) as exception:
            parser_mediator.ProduceExtractionError(
                'unable to parse pair identifier with error: {0!s}'.format(
                    exception))
            return None, None

        # Name = Length name + name + 0x00
        try:
            pair_name = self.PAIR_NAME.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            parser_mediator.ProduceExtractionError(
                'unable to parse pair name with error: {0!s}'.format(
                    exception))
            return None, None

        try:
            name = pair_name.text.decode('utf-8')
        except UnicodeDecodeError as exception:
            parser_mediator.ProduceExtractionError(
                'unable to decode pair name with error: {0!s}'.format(
                    exception))
            return None, None

        # Value: can be integer, boolean or text select by Type ID.
        if type_id in (self.TYPE_GENERAL_INTEGER, self.TYPE_INTEGER,
                       self.TYPE_ENUMERATION):
            value_structure = self.INTEGER
        elif type_id == self.TYPE_BOOL:
            value_structure = self.BOOLEAN
        elif type_id == self.TYPE_DATETIME:
            value_structure = self.DATETIME
        else:
            value_structure = self.TEXT

        try:
            value = value_structure.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            parser_mediator.ProduceExtractionError(
                'unable to parse value with error: {0!s}'.format(exception))
            return None, None

        if type_id in (self.TYPE_GENERAL_INTEGER, self.TYPE_INTEGER,
                       self.TYPE_ENUMERATION):
            value = value.integer

        elif type_id == self.TYPE_BOOL:
            value = bool(value.integer)

        elif type_id == self.TYPE_DATETIME:
            rfc2579_date_time_tuple = (value.year, value.month, value.day,
                                       value.hour, value.minutes,
                                       value.seconds, value.deciseconds,
                                       value.direction_from_utc,
                                       value.hours_from_utc,
                                       value.minutes_from_utc)
            value = dfdatetime_rfc2579_date_time.RFC2579DateTime(
                rfc2579_date_time_tuple=rfc2579_date_time_tuple)

        else:
            try:
                value = value.decode('utf-8')
            except UnicodeDecodeError as exception:
                parser_mediator.ProduceExtractionError(
                    'unable to decode value with error: {0!s}'.format(
                        exception))
                return None, None

        return name, value

    def _ReadPairs(self, parser_mediator, file_object):
        """Reads the attribute name and value pairs from a CUPS IPP event.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_object (dfvfs.FileIO): file-like object.

    Returns:
      dict[str, list[str]]: values per name.
    """
        data_dict = {}

        name, value = self._ReadPair(parser_mediator, file_object)
        while name or value:
            # Translate the known "name" CUPS IPP to a generic name value.
            pretty_name = self._NAME_PAIR_TRANSLATION.get(name, name)
            data_dict.setdefault(pretty_name, []).append(value)
            name, value = self._ReadPair(parser_mediator, file_object)

        return data_dict

    def ParseFileObject(self, parser_mediator, file_object, **kwargs):
        """Parses a CUPS IPP file-like object.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_object (dfvfs.FileIO): file-like object.

    Raises:
      UnableToParseFile: when the file cannot be parsed.
    """
        try:
            header = self.CUPS_IPP_HEADER.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            raise errors.UnableToParseFile(
                'Unable to parse CUPS IPP Header with error: {0!s}'.format(
                    exception))

        if (header.major_version != self.IPP_MAJOR_VERSION
                or header.minor_version != self.IPP_MINOR_VERSION):
            raise errors.UnableToParseFile(
                '[{0:s}] Unsupported version number.'.format(self.NAME))

        if header.operation_id != self.IPP_OP_ID:
            # Warn if the operation ID differs from the standard one. We should be
            # able to parse the file nonetheless.
            logger.debug(
                '[{0:s}] Unsupported operation identifier in file: {1:s}.'.
                format(self.NAME, parser_mediator.GetDisplayName()))

        data_dict = self._ReadPairs(parser_mediator, file_object)

        time_dict = {}

        for name in self._DATE_TIME_VALUE_NAMES:
            value = data_dict.get(name, None)
            if value is not None:
                time_dict[name] = value
                del data_dict[name]

        event_data = CupsIppEventData()
        event_data.application = self._GetStringValue(data_dict, 'application')
        event_data.computer_name = self._GetStringValue(
            data_dict, 'computer_name')
        event_data.copies = data_dict.get('copies', [0])[0]
        event_data.data_dict = data_dict
        event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')
        event_data.job_id = self._GetStringValue(data_dict, 'job_id')
        event_data.job_name = self._GetStringValue(data_dict, 'job_name')
        event_data.user = self._GetStringValue(data_dict, 'user')
        event_data.owner = self._GetStringValue(data_dict, 'owner')
        event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')
        event_data.uri = self._GetStringValue(data_dict, 'uri')

        for name, usage in iter(self._DATE_TIME_VALUES.items()):
            time_values = time_dict.get(name, [])
            for date_time in time_values:
                event = time_events.DateTimeValuesEvent(date_time, usage)
                parser_mediator.ProduceEventWithEventData(event, event_data)

        for name, usage in iter(self._POSIX_TIME_VALUES.items()):
            time_values = time_dict.get(name, [])
            for time_value in time_values:
                date_time = dfdatetime_posix_time.PosixTime(
                    timestamp=time_value)
                event = time_events.DateTimeValuesEvent(date_time, usage)
                parser_mediator.ProduceEventWithEventData(event, event_data)
예제 #21
0
        validator=_validator_maybe(value_in_range, validator),
        **kwargs,
    )


u8 = partial(unsigned, 8)
u16 = partial(unsigned, 16)
u32 = partial(unsigned, 32)
u64 = partial(unsigned, 64)

i8 = partial(signed, 8)
i16 = partial(signed, 16)
i32 = partial(signed, 32)
i64 = partial(signed, 64)

string = partial(attribute, construct.PascalString(construct.Int16ul, "utf8"), str)

data = partial(
    attribute, construct.Prefixed(construct.Int32ul, construct.GreedyBytes), bytes
)


def enum(constructor, type, **kwargs):
    return attribute(constructor, type, converter=type, **kwargs)


def struct(type, **kwargs):
    return attribute(type._construct, type, converter=lambda x: type(**x), **kwargs)


def constructify(class_):
예제 #22
0
OPCODE_TO_MNEMONIC = {
    Opcode.NOP: 'nop',
    Opcode.INVERT: 'not',
    Opcode.BITWISE_OR: 'or',
    Opcode.BITWISE_AND: 'and',
    Opcode.BITWISE_NOR: 'nor',
    Opcode.RETURN: 'ret',
}

symbol_entry = construct.Struct(
    "argument_count" / construct.Int8ub,
    "arguments" /
    construct.Array(construct.this.argument_count,
                    construct.Enum(construct.Int8ub, ArgumentType)),
    "name" / construct.PascalString(construct.VarInt, "utf8"),
    "start" / construct.Int16ub,  # Offset into "instructions"
    # I don't think Construct's Pointer can be used for this.
    # "start" / Pointer(8, Bytes(1)),
    # For debug purposes this could potentially contain argument names.
    # Or have a separate 'debug' entry for details only needed for debugging
    # or reverse-engineering.
)

instruction_definition = construct.Struct(
    "opcode" / construct.Enum(construct.Byte, Opcode),
    "operand_a" / construct.Int8ub,
    "operand_b" / construct.Int8ub,
    "reserved" / construct.Const(0xFF, construct.Int8ub),
)
예제 #23
0
class KeychainParser(interface.BaseParser):
  """Parser for Keychain files."""

  NAME = 'mac_keychain'
  DESCRIPTION = u'Parser for Mac OS X Keychain files.'

  KEYCHAIN_MAGIC_HEADER = 'kych'
  KEYCHAIN_MAJOR_VERSION = 1
  KEYCHAIN_MINOR_VERSION = 0

  RECORD_TYPE_APPLICATION = 0x80000000
  RECORD_TYPE_INTERNET = 0x80000001

  # DB HEADER.
  KEYCHAIN_DB_HEADER = construct.Struct(
      'db_header',
      construct.String('magic', 4),
      construct.UBInt16('major_version'),
      construct.UBInt16('minor_version'),
      construct.UBInt32('header_size'),
      construct.UBInt32('schema_offset'),
      construct.Padding(4))

  # DB SCHEMA.
  KEYCHAIN_DB_SCHEMA = construct.Struct(
      'db_schema',
      construct.UBInt32('size'),
      construct.UBInt32('number_of_tables'))
  # For each number_of_tables, the schema has a TABLE_OFFSET with the
  # offset starting in the DB_SCHEMA.
  TABLE_OFFSET = construct.UBInt32('table_offset')

  TABLE_HEADER = construct.Struct(
      'table_header',
      construct.UBInt32('table_size'),
      construct.UBInt32('record_type'),
      construct.UBInt32('number_of_records'),
      construct.UBInt32('first_record'),
      construct.UBInt32('index_offset'),
      construct.Padding(4),
      construct.UBInt32('recordnumbercount'))

  RECORD_HEADER = construct.Struct(
      'record_header',
      construct.UBInt32('entry_length'),
      construct.Padding(12),
      construct.UBInt32('ssgp_length'),
      construct.Padding(4),
      construct.UBInt32('creation_time'),
      construct.UBInt32('last_mod_time'),
      construct.UBInt32('text_description'),
      construct.Padding(4),
      construct.UBInt32('comments'),
      construct.Padding(8),
      construct.UBInt32('entry_name'),
      construct.Padding(20),
      construct.UBInt32('account_name'),
      construct.Padding(4))
  RECORD_HEADER_APP = construct.Struct(
      'record_entry_app',
      RECORD_HEADER,
      construct.Padding(4))
  RECORD_HEADER_INET = construct.Struct(
      'record_entry_inet',
      RECORD_HEADER,
      construct.UBInt32('where'),
      construct.UBInt32('protocol'),
      construct.UBInt32('type'),
      construct.Padding(4),
      construct.UBInt32('url'))

  TEXT = construct.PascalString(
      'text', length_field = construct.UBInt32('length'))
  TIME = construct.Struct(
      'timestamp',
      construct.String('year', 4),
      construct.String('month', 2),
      construct.String('day', 2),
      construct.String('hour', 2),
      construct.String('minute', 2),
      construct.String('second', 2),
     construct.Padding(2))
  TYPE_TEXT = construct.String('type', 4)

  # TODO: add more protocols.
  _PROTOCOL_TRANSLATION_DICT = {
      u'htps': u'https',
      u'smtp': u'smtp',
      u'imap': u'imap',
      u'http': u'http'}

  def _GetTimestampFromEntry(self, parser_context, file_entry, structure):
    """Parse a time entry structure into a microseconds since Epoch in UTC.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      file_entry: A file entry object (instance of dfvfs.FileEntry).
      structure: TIME entry structure:
                 year: String with the number of the year.
                 month: String with the number of the month.
                 day: String with the number of the day.
                 hour: String with the number of the month.
                 minute: String with the number of the minute.
                 second: String with the number of the second.

    Returns:
      Microseconds since Epoch in UTC.
    """
    try:
      return timelib.Timestamp.FromTimeParts(
          int(structure.year, 10), int(structure.month, 10),
          int(structure.day, 10), int(structure.hour, 10),
          int(structure.minute, 10), int(structure.second, 10))
    except ValueError:
      logging.warning(
          u'[{0:s}] Invalid keychain time {1!s} in file: {2:s}'.format(
              self.NAME, parser_context.GetDisplayName(file_entry), structure))
      return 0

  def _ReadEntryApplication(self, parser_context, file_object, file_entry=None):
    """Extracts the information from an application password entry.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      file_object: A file-like object that points to an Keychain file.
      file_entry: Optional file entry object (instance of dfvfs.FileEntry).
                  The default is None.
    """
    offset = file_object.tell()
    try:
      record = self.RECORD_HEADER_APP.parse_stream(file_object)
    except (IOError, construct.FieldError):
      logging.warning((
          u'[{0:s}] Unsupported record header at 0x{1:08x} in file: '
          u'{2:s}').format(
              self.NAME, offset, parser_context.GetDisplayName(file_entry)))
      return

    (ssgp_hash, creation_time, last_mod_time, text_description,
     comments, entry_name, account_name) = self._ReadEntryHeader(
         parser_context, file_entry, file_object, record.record_header, offset)

    # Move to the end of the record, and then, prepared for the next record.
    file_object.seek(
        record.record_header.entry_length + offset - file_object.tell(),
        os.SEEK_CUR)
    event_object = KeychainApplicationRecordEvent(
        creation_time, eventdata.EventTimestamp.CREATION_TIME,
        entry_name, account_name, text_description, comments, ssgp_hash)
    parser_context.ProduceEvent(
        event_object, parser_name=self.NAME, file_entry=file_entry)

    if creation_time != last_mod_time:
      event_object = KeychainApplicationRecordEvent(
          last_mod_time, eventdata.EventTimestamp.MODIFICATION_TIME,
          entry_name, account_name, text_description, comments, ssgp_hash)
      parser_context.ProduceEvent(
          event_object, parser_name=self.NAME, file_entry=file_entry)

  def _ReadEntryHeader(
      self, parser_context, file_entry, file_object, record, offset):
    """Read the common record attributes.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      file_entry: A file entry object (instance of dfvfs.FileEntry).
      file_object: A file-like object that points to an Keychain file.
      record: Structure with the header of the record.
      offset: First byte of the record.

    Returns:
      A list of:
        ssgp_hash: Hash of the encrypted data (passwd, cert, note).
        creation_time: When the entry was created.
        last_mod_time: Last time the entry was updated.
        text_description: A brief description of the entry.
        entry_name: Name of the entry
        account_name: Name of the account.
    """
    # Info: The hash header always start with the string ssgp follow by
    #       the hash. Furthermore The fields are always a multiple of four.
    #       Then if it is not multiple the value is padded by 0x00.
    ssgp_hash = binascii.hexlify(file_object.read(record.ssgp_length)[4:])

    file_object.seek(
        record.creation_time - file_object.tell() + offset - 1, os.SEEK_CUR)
    creation_time = self._GetTimestampFromEntry(
        parser_context, file_entry, self.TIME.parse_stream(file_object))

    file_object.seek(
        record.last_mod_time - file_object.tell() + offset - 1, os.SEEK_CUR)
    last_mod_time = self._GetTimestampFromEntry(
        parser_context, file_entry, self.TIME.parse_stream(file_object))

    # The comment field does not always contain data.
    if record.text_description:
      file_object.seek(
          record.text_description - file_object.tell() + offset -1,
          os.SEEK_CUR)
      text_description = self.TEXT.parse_stream(file_object)
    else:
      text_description = u'N/A'

    # The comment field does not always contain data.
    if record.comments:
      file_object.seek(
          record.text_description - file_object.tell() + offset -1,
          os.SEEK_CUR)
      comments = self.TEXT.parse_stream(file_object)
    else:
      comments = u'N/A'

    file_object.seek(
        record.entry_name - file_object.tell() + offset - 1, os.SEEK_CUR)
    entry_name = self.TEXT.parse_stream(file_object)

    file_object.seek(
        record.account_name - file_object.tell() + offset - 1, os.SEEK_CUR)
    account_name = self.TEXT.parse_stream(file_object)

    return (
        ssgp_hash, creation_time, last_mod_time,
        text_description, comments, entry_name, account_name)

  def _ReadEntryInternet(self, parser_context, file_object, file_entry=None):
    """Extracts the information from an Internet password entry.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      file_object: A file-like object that points to an Keychain file.
      file_entry: Optional file entry object (instance of dfvfs.FileEntry).
                  The default is None.
    """
    offset = file_object.tell()
    try:
      record = self.RECORD_HEADER_INET.parse_stream(file_object)
    except (IOError, construct.FieldError):
      logging.warning((
          u'[{0:s}] Unsupported record header at 0x{1:08x} in file: '
          u'{2:s}').format(
              self.NAME, offset, parser_context.GetDisplayName(file_entry)))
      return

    (ssgp_hash, creation_time, last_mod_time, text_description,
     comments, entry_name, account_name) = self._ReadEntryHeader(
         parser_context, file_entry, file_object, record.record_header, offset)
    if not record.where:
      where = u'N/A'
      protocol = u'N/A'
      type_protocol = u'N/A'
    else:
      file_object.seek(
          record.where - file_object.tell() + offset - 1, os.SEEK_CUR)
      where = self.TEXT.parse_stream(file_object)
      file_object.seek(
          record.protocol - file_object.tell() + offset - 1, os.SEEK_CUR)
      protocol = self.TYPE_TEXT.parse_stream(file_object)
      file_object.seek(
          record.type - file_object.tell() + offset - 1, os.SEEK_CUR)
      type_protocol = self.TEXT.parse_stream(file_object)
      type_protocol = self._PROTOCOL_TRANSLATION_DICT.get(
          type_protocol, type_protocol)
      if record.url:
        file_object.seek(
            record.url - file_object.tell() + offset - 1, os.SEEK_CUR)
        url = self.TEXT.parse_stream(file_object)
        where = u'{0:s}{1:s}'.format(where, url)

    # Move to the end of the record, and then, prepared for the next record.
    file_object.seek(
        record.record_header.entry_length + offset - file_object.tell(),
        os.SEEK_CUR)

    event_object = KeychainInternetRecordEvent(
        creation_time, eventdata.EventTimestamp.CREATION_TIME,
        entry_name, account_name, text_description,
        comments, where, protocol, type_protocol, ssgp_hash)
    parser_context.ProduceEvent(
        event_object, parser_name=self.NAME, file_entry=file_entry)

    if creation_time != last_mod_time:
      event_object = KeychainInternetRecordEvent(
          last_mod_time, eventdata.EventTimestamp.MODIFICATION_TIME,
          entry_name, account_name, text_description,
          comments, where, protocol, type_protocol)
      parser_context.ProduceEvent(
          event_object, parser_name=self.NAME, file_entry=file_entry)

  def _VerifyStructure(self, file_object):
    """Verify that we are dealing with an Keychain entry.

    Args:
      file_object: A file-like object that points to an Keychain file.

    Returns:
      A list of table positions if it is a keychain, None otherwise.
    """
    # INFO: The HEADER KEYCHAIN:
    # [DBHEADER] + [DBSCHEMA] + [OFFSET TABLE A] + ... + [OFFSET TABLE Z]
    # Where the table offset is relative to the first byte of the DB Schema,
    # then we must add to this offset the size of the [DBHEADER].
    try:
      db_header = self.KEYCHAIN_DB_HEADER.parse_stream(file_object)
    except (IOError, construct.FieldError):
      return
    if (db_header.minor_version != self.KEYCHAIN_MINOR_VERSION or
        db_header.major_version != self.KEYCHAIN_MAJOR_VERSION or
        db_header.magic != self.KEYCHAIN_MAGIC_HEADER):
      return

    # Read the database schema and extract the offset for all the tables.
    # They are ordered by file position from the top to the bottom of the file.
    try:
      db_schema = self.KEYCHAIN_DB_SCHEMA.parse_stream(file_object)
    except (IOError, construct.FieldError):
      return
    table_offsets = []
    for _ in range(db_schema.number_of_tables):
      try:
        table_offset = self.TABLE_OFFSET.parse_stream(file_object)
      except (IOError, construct.FieldError):
        return
      table_offsets.append(table_offset + self.KEYCHAIN_DB_HEADER.sizeof())
    return table_offsets

  def Parse(self, parser_context, file_entry):
    """Extract data from a Keychain file.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      file_entry: A file entry object (instance of dfvfs.FileEntry).
    """
    file_object = file_entry.GetFileObject()
    table_offsets = self._VerifyStructure(file_object)
    if not table_offsets:
      file_object.close()
      raise errors.UnableToParseFile(u'The file is not a Keychain file.')

    for table_offset in table_offsets:
      # Skipping X bytes, unknown data at this point.
      file_object.seek(table_offset - file_object.tell(), os.SEEK_CUR)
      try:
        table = self.TABLE_HEADER.parse_stream(file_object)
      except construct.FieldError as exception:
        logging.warning((
            u'[{0:s}] Unable to parse table header in file: {1:s} '
            u'with error: {2:s}.').format(
                self.NAME, parser_context.GetDisplayName(file_entry),
                exception))
        continue

      # Table_offset: absolute byte in the file where the table starts.
      # table.first_record: first record in the table, relative to the
      #                     first byte of the table.
      file_object.seek(
          table_offset + table.first_record - file_object.tell(), os.SEEK_CUR)

      if table.record_type == self.RECORD_TYPE_INTERNET:
        for _ in range(table.number_of_records):
          self._ReadEntryInternet(
              parser_context, file_object, file_entry=file_entry)

      elif table.record_type == self.RECORD_TYPE_APPLICATION:
        for _ in range(table.number_of_records):
          self._ReadEntryApplication(
              parser_context, file_object, file_entry=file_entry)

    file_object.close()
예제 #24
0
    2147483654: u'Extended attribute',
    2147487744: u'X509 Certificates',
    2147516416: u'Metadata information'
}

TABLE_HEADER = construct.Struct('table_header',
                                construct.UBInt32('table_size'),
                                construct.UBInt32('record_type'),
                                construct.UBInt32('number_of_records'),
                                construct.UBInt32('first_record'),
                                construct.UBInt32('index_offset'),
                                construct.Padding(4),
                                construct.UBInt32('recordnumbercount'))

# RECORD
TEXT = construct.PascalString('text', length_field=construct.UBInt32('length'))
TIME = construct.Struct('timestamp', construct.String('year', 4),
                        construct.String('month',
                                         2), construct.String('day', 2),
                        construct.String('hour', 2),
                        construct.String('minute', 2),
                        construct.String('second', 2), construct.Padding(2))
TYPE_TEXT = construct.String('type', 4)
RECORD_HEADER = construct.Struct('record_entry',
                                 construct.UBInt32('entry_length'),
                                 construct.Padding(12),
                                 construct.UBInt32('ssgp_length'),
                                 construct.Padding(4),
                                 construct.UBInt32('creation_time'),
                                 construct.UBInt32('last_mod_time'),
                                 construct.UBInt32('text_description'),
예제 #25
0
class KeychainParser(interface.FileObjectParser):
  """Parser for Keychain files."""

  NAME = 'mac_keychain'
  DESCRIPTION = 'Parser for MacOS Keychain files.'

  KEYCHAIN_SIGNATURE = b'kych'
  KEYCHAIN_MAJOR_VERSION = 1
  KEYCHAIN_MINOR_VERSION = 0

  RECORD_TYPE_APPLICATION = 0x80000000
  RECORD_TYPE_INTERNET = 0x80000001

  # DB HEADER.
  KEYCHAIN_DB_HEADER = construct.Struct(
      'db_header',
      construct.Bytes('signature', 4),
      construct.UBInt16('major_version'),
      construct.UBInt16('minor_version'),
      construct.UBInt32('header_size'),
      construct.UBInt32('schema_offset'),
      construct.Padding(4))

  # DB SCHEMA.
  KEYCHAIN_DB_SCHEMA = construct.Struct(
      'db_schema',
      construct.UBInt32('size'),
      construct.UBInt32('number_of_tables'))

  # For each number_of_tables, the schema has a TABLE_OFFSET with the
  # offset starting in the DB_SCHEMA.
  TABLE_OFFSET = construct.UBInt32('table_offset')

  TABLE_HEADER = construct.Struct(
      'table_header',
      construct.UBInt32('table_size'),
      construct.UBInt32('record_type'),
      construct.UBInt32('number_of_records'),
      construct.UBInt32('first_record'),
      construct.UBInt32('index_offset'),
      construct.Padding(4),
      construct.UBInt32('recordnumbercount'))

  RECORD_HEADER = construct.Struct(
      'record_header',
      construct.UBInt32('entry_length'),
      construct.Padding(12),
      construct.UBInt32('ssgp_length'),
      construct.Padding(4),
      construct.UBInt32('creation_time'),
      construct.UBInt32('last_modification_time'),
      construct.UBInt32('text_description'),
      construct.Padding(4),
      construct.UBInt32('comments'),
      construct.Padding(8),
      construct.UBInt32('entry_name'),
      construct.Padding(20),
      construct.UBInt32('account_name'),
      construct.Padding(4))

  RECORD_HEADER_APP = construct.Struct(
      'record_entry_app',
      RECORD_HEADER,
      construct.Padding(4))

  RECORD_HEADER_INET = construct.Struct(
      'record_entry_inet',
      RECORD_HEADER,
      construct.UBInt32('where'),
      construct.UBInt32('protocol'),
      construct.UBInt32('type'),
      construct.Padding(4),
      construct.UBInt32('url'))

  TEXT = construct.PascalString(
      'text', length_field=construct.UBInt32('length'))

  TIME = construct.Struct(
      'timestamp',
      construct.String('year', 4),
      construct.String('month', 2),
      construct.String('day', 2),
      construct.String('hour', 2),
      construct.String('minute', 2),
      construct.String('second', 2),
      construct.Padding(2))

  TYPE_TEXT = construct.String('type', 4)

  # TODO: add more protocols.
  _PROTOCOL_TRANSLATION_DICT = {
      'htps': 'https',
      'smtp': 'smtp',
      'imap': 'imap',
      'http': 'http'}

  def _ReadEntryApplication(self, parser_mediator, file_object):
    """Extracts the information from an application password entry.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_object (dfvfs.FileIO): a file-like object.
    """
    record_offset = file_object.tell()
    try:
      record_struct = self.RECORD_HEADER_APP.parse_stream(file_object)
    except (IOError, construct.FieldError):
      parser_mediator.ProduceExtractionError(
          'unable to parse record structure at offset: 0x{0:08x}'.format(
              record_offset))
      return

    (ssgp_hash, creation_time, last_modification_time, text_description,
     comments, entry_name, account_name) = self._ReadEntryHeader(
         parser_mediator, file_object, record_struct.record_header,
         record_offset)

    # Move to the end of the record.
    next_record_offset = (
        record_offset + record_struct.record_header.entry_length)
    file_object.seek(next_record_offset, os.SEEK_SET)

    event_data = KeychainApplicationRecordEventData()
    event_data.account_name = account_name
    event_data.comments = comments
    event_data.entry_name = entry_name
    event_data.ssgp_hash = ssgp_hash
    event_data.text_description = text_description

    if creation_time:
      event = time_events.DateTimeValuesEvent(
          creation_time, definitions.TIME_DESCRIPTION_CREATION)
      parser_mediator.ProduceEventWithEventData(event, event_data)

    if last_modification_time:
      event = time_events.DateTimeValuesEvent(
          last_modification_time, definitions.TIME_DESCRIPTION_MODIFICATION)
      parser_mediator.ProduceEventWithEventData(event, event_data)

  def _ReadEntryHeader(
      self, parser_mediator, file_object, record, record_offset):
    """Read the common record attributes.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_entry (dfvfs.FileEntry): a file entry object.
      file_object (dfvfs.FileIO): a file-like object.
      record (construct.Struct): record header structure.
      record_offset (int): offset of the start of the record.

    Returns:
      A tuple containing:
        ssgp_hash: Hash of the encrypted data (passwd, cert, note).
        creation_time (dfdatetime.TimeElements): entry creation time or None.
        last_modification_time ((dfdatetime.TimeElements): entry last
            modification time or None.
        text_description: A brief description of the entry.
        entry_name: Name of the entry
        account_name: Name of the account.
    """
    # TODO: reduce number of seeks and/or offset calculations needed
    # for parsing.

    # Info: The hash header always start with the string ssgp follow by
    #       the hash. Furthermore The fields are always a multiple of four.
    #       Then if it is not multiple the value is padded by 0x00.
    ssgp_hash = binascii.hexlify(file_object.read(record.ssgp_length)[4:])

    creation_time = None

    structure_offset = record_offset + record.creation_time - 1
    file_object.seek(structure_offset, os.SEEK_SET)

    try:
      time_structure = self.TIME.parse_stream(file_object)
    except construct.FieldError as exception:
      time_structure = None
      parser_mediator.ProduceExtractionError(
          'unable to parse creation time with error: {0!s}'.format(exception))

    if time_structure:
      time_elements_tuple = (
          time_structure.year, time_structure.month, time_structure.day,
          time_structure.hour, time_structure.minute, time_structure.second)

      creation_time = dfdatetime_time_elements.TimeElements()
      try:
        creation_time.CopyFromStringTuple(
            time_elements_tuple=time_elements_tuple)
      except ValueError:
        creation_time = None
        parser_mediator.ProduceExtractionError(
            'invalid creation time value: {0!s}'.format(time_elements_tuple))

    last_modification_time = None

    structure_offset = record_offset + record.last_modification_time - 1
    file_object.seek(structure_offset, os.SEEK_SET)

    try:
      time_structure = self.TIME.parse_stream(file_object)
    except construct.FieldError as exception:
      time_structure = None
      parser_mediator.ProduceExtractionError(
          'unable to parse last modification time with error: {0!s}'.format(
              exception))

    if time_structure:
      time_elements_tuple = (
          time_structure.year, time_structure.month, time_structure.day,
          time_structure.hour, time_structure.minute, time_structure.second)

      last_modification_time = dfdatetime_time_elements.TimeElements()
      try:
        last_modification_time.CopyFromStringTuple(
            time_elements_tuple=time_elements_tuple)
      except ValueError:
        last_modification_time = None
        parser_mediator.ProduceExtractionError(
            'invalid last modification time value: {0!s}'.format(
                time_elements_tuple))

    text_description = 'N/A'
    if record.text_description:
      structure_offset = record_offset + record.text_description - 1
      file_object.seek(structure_offset, os.SEEK_SET)

      try:
        text_description = self.TEXT.parse_stream(file_object)
      except construct.FieldError as exception:
        parser_mediator.ProduceExtractionError(
            'unable to parse text description with error: {0!s}'.format(
                exception))

    comments = 'N/A'
    if record.comments:
      structure_offset = record_offset + record.comments - 1
      file_object.seek(structure_offset, os.SEEK_SET)

      try:
        comments = self.TEXT.parse_stream(file_object)
      except construct.FieldError as exception:
        parser_mediator.ProduceExtractionError(
            'unable to parse comments with error: {0!s}'.format(exception))

    structure_offset = record_offset + record.entry_name - 1
    file_object.seek(structure_offset, os.SEEK_SET)

    try:
      entry_name = self.TEXT.parse_stream(file_object)
    except construct.FieldError as exception:
      entry_name = 'N/A'
      parser_mediator.ProduceExtractionError(
          'unable to parse entry name with error: {0!s}'.format(exception))

    structure_offset = record_offset + record.account_name - 1
    file_object.seek(structure_offset, os.SEEK_SET)

    try:
      account_name = self.TEXT.parse_stream(file_object)
    except construct.FieldError as exception:
      account_name = 'N/A'
      parser_mediator.ProduceExtractionError(
          'unable to parse account name with error: {0!s}'.format(exception))

    return (
        ssgp_hash, creation_time, last_modification_time,
        text_description, comments, entry_name, account_name)

  def _ReadEntryInternet(self, parser_mediator, file_object):
    """Extracts the information from an Internet password entry.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_object (dfvfs.FileIO): a file-like object.
    """
    record_offset = file_object.tell()
    try:
      record_header_struct = self.RECORD_HEADER_INET.parse_stream(file_object)
    except (IOError, construct.FieldError):
      parser_mediator.ProduceExtractionError((
          'unable to parse record header structure at offset: '
          '0x{0:08x}').format(record_offset))
      return

    (ssgp_hash, creation_time, last_modification_time, text_description,
     comments, entry_name, account_name) = self._ReadEntryHeader(
         parser_mediator, file_object, record_header_struct.record_header,
         record_offset)

    if not record_header_struct.where:
      where = 'N/A'
      protocol = 'N/A'
      type_protocol = 'N/A'

    else:
      offset = record_offset + record_header_struct.where - 1
      file_object.seek(offset, os.SEEK_SET)
      where = self.TEXT.parse_stream(file_object)

      offset = record_offset + record_header_struct.protocol - 1
      file_object.seek(offset, os.SEEK_SET)
      protocol = self.TYPE_TEXT.parse_stream(file_object)

      offset = record_offset + record_header_struct.type - 1
      file_object.seek(offset, os.SEEK_SET)
      type_protocol = self.TEXT.parse_stream(file_object)
      type_protocol = self._PROTOCOL_TRANSLATION_DICT.get(
          type_protocol, type_protocol)

      if record_header_struct.url:
        offset = record_offset + record_header_struct.url - 1
        file_object.seek(offset, os.SEEK_SET)
        url = self.TEXT.parse_stream(file_object)
        where = '{0:s}{1:s}'.format(where, url)

    # Move to the end of the record.
    next_record_offset = (
        record_offset + record_header_struct.record_header.entry_length)
    file_object.seek(next_record_offset, os.SEEK_SET)

    event_data = KeychainInternetRecordEventData()
    event_data.account_name = account_name
    event_data.comments = comments
    event_data.entry_name = entry_name
    event_data.protocol = protocol
    event_data.ssgp_hash = ssgp_hash
    event_data.text_description = text_description
    event_data.type_protocol = type_protocol
    event_data.where = where

    if creation_time:
      event = time_events.DateTimeValuesEvent(
          creation_time, definitions.TIME_DESCRIPTION_CREATION)
      parser_mediator.ProduceEventWithEventData(event, event_data)

    if last_modification_time:
      event = time_events.DateTimeValuesEvent(
          last_modification_time, definitions.TIME_DESCRIPTION_MODIFICATION)
      parser_mediator.ProduceEventWithEventData(event, event_data)

  def _ReadTableOffsets(self, parser_mediator, file_object):
    """Reads the table offsets.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_object (dfvfs.FileIO): a file-like object.

    Returns:
      list[int]: table offsets.
    """
    # INFO: The HEADER KEYCHAIN:
    # [DBHEADER] + [DBSCHEMA] + [OFFSET TABLE A] + ... + [OFFSET TABLE Z]
    # Where the table offset is relative to the first byte of the DB Schema,
    # then we must add to this offset the size of the [DBHEADER].
    # Read the database schema and extract the offset for all the tables.
    # They are ordered by file position from the top to the bottom of the file.
    table_offsets = []

    try:
      db_schema_struct = self.KEYCHAIN_DB_SCHEMA.parse_stream(file_object)
    except (IOError, construct.FieldError):
      parser_mediator.ProduceExtractionError(
          'unable to parse database schema structure')
      return []

    for index in range(db_schema_struct.number_of_tables):
      try:
        table_offset = self.TABLE_OFFSET.parse_stream(file_object)
      except (IOError, construct.FieldError):
        parser_mediator.ProduceExtractionError(
            'unable to parse table offsets: {0:d}'.format(index))
        return

      table_offsets.append(table_offset + self.KEYCHAIN_DB_HEADER.sizeof())

    return table_offsets

  @classmethod
  def GetFormatSpecification(cls):
    """Retrieves the format specification.

    Returns:
      FormatSpecification: format specification.
    """
    format_specification = specification.FormatSpecification(cls.NAME)
    format_specification.AddNewSignature(
        cls.KEYCHAIN_SIGNATURE, offset=0)
    return format_specification

  def ParseFileObject(self, parser_mediator, file_object, **kwargs):
    """Parses a MacOS keychain file-like object.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      file_object (dfvfs.FileIO): a file-like object.

    Raises:
      UnableToParseFile: when the file cannot be parsed.
    """
    try:
      db_header = self.KEYCHAIN_DB_HEADER.parse_stream(file_object)
    except (IOError, construct.FieldError):
      raise errors.UnableToParseFile('Unable to parse file header.')

    if db_header.signature != self.KEYCHAIN_SIGNATURE:
      raise errors.UnableToParseFile('Not a MacOS keychain file.')

    if (db_header.major_version != self.KEYCHAIN_MAJOR_VERSION or
        db_header.minor_version != self.KEYCHAIN_MINOR_VERSION):
      parser_mediator.ProduceExtractionError(
          'unsupported format version: {0:s}.{1:s}'.format(
              db_header.major_version, db_header.minor_version))
      return

    # TODO: document format and determine if -1 offset correction is needed.
    table_offsets = self._ReadTableOffsets(parser_mediator, file_object)
    for table_offset in table_offsets:
      # Skipping X bytes, unknown data at this point.
      file_object.seek(table_offset, os.SEEK_SET)

      try:
        table = self.TABLE_HEADER.parse_stream(file_object)
      except (IOError, construct.FieldError):
        parser_mediator.ProduceExtractionError(
            'unable to parse table structure at offset: 0x{0:08x}'.format(
                table_offset))
        continue

      # Table_offset: absolute byte in the file where the table starts.
      # table.first_record: first record in the table, relative to the
      #                     first byte of the table.
      file_object.seek(table_offset + table.first_record, os.SEEK_SET)

      if table.record_type == self.RECORD_TYPE_INTERNET:
        for _ in range(table.number_of_records):
          self._ReadEntryInternet(parser_mediator, file_object)

      elif table.record_type == self.RECORD_TYPE_APPLICATION:
        for _ in range(table.number_of_records):
          self._ReadEntryApplication(parser_mediator, file_object)
예제 #26
0
# Control message for our protocol; first few bits are special as we have to
# maintain compatibility with LTPv3 in the kernel (first bit must be 1); also
# the packet must be at least 12 bytes in length, otherwise some firewalls
# may filter it when used over port 53
ControlMessage = cs.Struct(
    "control",
    # Ensure that the first bit is 1 (L2TP control packet)
    cs.Const(cs.UBInt8("magic1"), 0x80),
    # Reduce conflict matching to other protocols as we run on port 53
    cs.Const(cs.UBInt16("magic2"), 0x73A7),
    # Protocol version to allow future upgrades
    cs.UBInt8("version"),
    # Message type
    cs.UBInt8("type"),
    # Message data (with length prefix)
    cs.PascalString("data"),
    # Pad the message so it is at least 12 bytes long
    cs.Padding(lambda ctx: max(0, 6 - len(ctx["data"]))),
)

# Unreliable messages (0x00 - 0x7F)
CONTROL_TYPE_COOKIE = 0x01
CONTROL_TYPE_PREPARE = 0x02
CONTROL_TYPE_ERROR = 0x03
CONTROL_TYPE_TUNNEL = 0x04
CONTROL_TYPE_KEEPALIVE = 0x05
CONTROL_TYPE_PMTUD = 0x06
CONTROL_TYPE_PMTUD_ACK = 0x07
CONTROL_TYPE_REL_ACK = 0x08

# Reliable messages (0x80 - 0xFF)
예제 #27
0
class CupsIppParser(interface.FileObjectParser):
    """Parser for CUPS IPP files. """

    NAME = u'cups_ipp'
    DESCRIPTION = u'Parser for CUPS IPP files.'

    # INFO:
    # For each file, we have only one document with three different timestamps:
    # Created, process and finished.
    # Format:
    # [HEADER: MAGIC + KNOWN_TYPE][GROUP A]...[GROUP Z][GROUP_END: 0x03]
    # GROUP: [GROUP ID][PAIR A]...[PAIR Z] where [PAIR: NAME + VALUE]
    #   GROUP ID: [1byte ID]
    #   PAIR: [TagID][\x00][Name][Value])
    #     TagID: 1 byte integer with the type of "Value".
    #     Name: [Length][Text][\00]
    #       Name can be empty when the name has more than one value.
    #       Example: family name "lopez mata" with more than one surname.
    #       Type_Text + [0x06, family, 0x00] + [0x05, lopez, 0x00] +
    #       Type_Text + [0x00, 0x00] + [0x04, mata, 0x00]
    #     Value: can be integer, boolean, or text provided by TagID.
    #       If boolean, Value: [\x01][0x00(False)] or [\x01(True)]
    #       If integer, Value: [\x04][Integer]
    #       If text,    Value: [Length text][Text][\00]

    # Magic number that identify the CUPS IPP supported version.
    IPP_MAJOR_VERSION = 2
    IPP_MINOR_VERSION = 0
    # Supported Operation ID.
    IPP_OP_ID = 5

    # CUPS IPP File header.
    CUPS_IPP_HEADER = construct.Struct(u'cups_ipp_header_struct',
                                       construct.UBInt8(u'major_version'),
                                       construct.UBInt8(u'minor_version'),
                                       construct.UBInt16(u'operation_id'),
                                       construct.UBInt32(u'request_id'))

    # Group ID that indicates the end of the IPP Control file.
    GROUP_END = 3
    # Identification Groups.
    GROUP_LIST = [1, 2, 4, 5, 6, 7]

    # Type ID, per cups source file ipp-support.c.
    TYPE_GENERAL_INTEGER = 0x20
    TYPE_INTEGER = 0x21
    TYPE_BOOL = 0x22
    TYPE_ENUMERATION = 0x23
    TYPE_DATETIME = 0x31

    # Type of values that can be extracted.
    INTEGER_8 = construct.UBInt8(u'integer')
    INTEGER_32 = construct.UBInt32(u'integer')
    TEXT = construct.PascalString(u'text',
                                  encoding='utf-8',
                                  length_field=construct.UBInt8(u'length'))
    BOOLEAN = construct.Struct(u'boolean_value', construct.Padding(1),
                               INTEGER_8)
    INTEGER = construct.Struct(u'integer_value', construct.Padding(1),
                               INTEGER_32)

    # This is an RFC 2579 datetime.
    DATETIME = construct.Struct(
        u'datetime',
        construct.Padding(1),
        construct.UBInt16(u'year'),
        construct.UBInt8(u'month'),
        construct.UBInt8(u'day'),
        construct.UBInt8(u'hour'),
        construct.UBInt8(u'minutes'),
        construct.UBInt8(u'seconds'),
        construct.UBInt8(u'deciseconds'),
        construct.String(u'direction_from_utc', length=1, encoding='ascii'),
        construct.UBInt8(u'hours_from_utc'),
        construct.UBInt8(u'minutes_from_utc'),
    )

    # Name of the pair.
    PAIR_NAME = construct.Struct(u'pair_name', TEXT, construct.Padding(1))

    # Specific CUPS IPP to generic name.
    NAME_PAIR_TRANSLATION = {
        u'printer-uri': u'uri',
        u'job-uuid': u'job_id',
        u'DestinationPrinterID': u'printer_id',
        u'job-originating-user-name': u'user',
        u'job-name': u'job_name',
        u'document-format': u'doc_type',
        u'job-originating-host-name': u'computer_name',
        u'com.apple.print.JobInfo.PMApplicationName': u'application',
        u'com.apple.print.JobInfo.PMJobOwner': u'owner'
    }

    def ParseFileObject(self, parser_mediator, file_object, **kwargs):
        """Parses a CUPS IPP file-like object.

    Args:
      parser_mediator: A parser mediator object (instance of ParserMediator).
      file_object: A file-like object.

    Raises:
      UnableToParseFile: when the file cannot be parsed.
    """
        try:
            header = self.CUPS_IPP_HEADER.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            raise errors.UnableToParseFile(
                u'Unable to parse CUPS IPP Header with error: {0:s}'.format(
                    exception))

        if (header.major_version != self.IPP_MAJOR_VERSION
                or header.minor_version != self.IPP_MINOR_VERSION):
            raise errors.UnableToParseFile(
                u'[{0:s}] Unsupported version number.'.format(self.NAME))

        if header.operation_id != self.IPP_OP_ID:
            # Warn if the operation ID differs from the standard one. We should be
            # able to parse the file nonetheless.
            logging.debug(
                u'[{0:s}] Unsupported operation identifier in file: {1:s}.'.
                format(self.NAME, parser_mediator.GetDisplayName()))

        # Read the pairs extracting the name and the value.
        data_dict = {}
        name, value = self.ReadPair(parser_mediator, file_object)
        while name or value:
            # Translate the known "name" CUPS IPP to a generic name value.
            pretty_name = self.NAME_PAIR_TRANSLATION.get(name, name)
            data_dict.setdefault(pretty_name, []).append(value)
            name, value = self.ReadPair(parser_mediator, file_object)

        # TODO: Refactor to use a lookup table to do event production.
        time_dict = {}
        for key, value in data_dict.items():
            if key.startswith(u'date-time-') or key.startswith(u'time-'):
                time_dict[key] = value
                del data_dict[key]

        if u'date-time-at-creation' in time_dict:
            event_object = CupsIppEvent(time_dict[u'date-time-at-creation'][0],
                                        eventdata.EventTimestamp.CREATION_TIME,
                                        data_dict)
            parser_mediator.ProduceEvent(event_object)

        if u'date-time-at-processing' in time_dict:
            event_object = CupsIppEvent(
                time_dict[u'date-time-at-processing'][0],
                eventdata.EventTimestamp.START_TIME, data_dict)
            parser_mediator.ProduceEvent(event_object)

        if u'date-time-at-completed' in time_dict:
            event_object = CupsIppEvent(
                time_dict[u'date-time-at-completed'][0],
                eventdata.EventTimestamp.END_TIME, data_dict)
            parser_mediator.ProduceEvent(event_object)

        if u'time-at-creation' in time_dict:
            time_value = time_dict[u'time-at-creation'][0]
            timestamp = timelib.Timestamp.FromPosixTime(time_value)
            event_object = CupsIppEvent(timestamp,
                                        eventdata.EventTimestamp.CREATION_TIME,
                                        data_dict)
            parser_mediator.ProduceEvent(event_object)

        if u'time-at-processing' in time_dict:
            time_value = time_dict[u'time-at-processing'][0]
            timestamp = timelib.Timestamp.FromPosixTime(time_value)
            event_object = CupsIppEvent(timestamp,
                                        eventdata.EventTimestamp.START_TIME,
                                        data_dict)
            parser_mediator.ProduceEvent(event_object)

        if u'time-at-completed' in time_dict:
            time_value = time_dict[u'time-at-completed'][0]
            timestamp = timelib.Timestamp.FromPosixTime(time_value)
            event_object = CupsIppEvent(timestamp,
                                        eventdata.EventTimestamp.END_TIME,
                                        data_dict)
            parser_mediator.ProduceEvent(event_object)

    def ReadPair(self, parser_mediator, file_object):
        """Reads an attribute name and value pair from a CUPS IPP event.

    Args:
      parser_mediator: A parser mediator object (instance of ParserMediator).
      file_object: a file-like object that points to a file.

    Returns:
      A list of name and value. If name and value cannot be read both are
      set to None.
    """
        # Pair = Type ID + Name + Value.
        try:
            # Can be:
            #   Group ID + IDtag = Group ID (1byte) + Tag ID (1byte) + '0x00'.
            #   IDtag = Tag ID (1byte) + '0x00'.
            type_id = self.INTEGER_8.parse_stream(file_object)
            if type_id == self.GROUP_END:
                return None, None

            elif type_id in self.GROUP_LIST:
                # If it is a group ID we must read the next byte that contains
                # the first TagID.
                type_id = self.INTEGER_8.parse_stream(file_object)

            # 0x00 separator character.
            _ = self.INTEGER_8.parse_stream(file_object)

        except (IOError, construct.FieldError):
            logging.warning(
                u'[{0:s}] Unsupported identifier in file: {1:s}.'.format(
                    self.NAME, parser_mediator.GetDisplayName()))
            return None, None

        # Name = Length name + name + 0x00
        try:
            name = self.PAIR_NAME.parse_stream(file_object).text
        except (IOError, construct.FieldError):
            logging.warning(u'[{0:s}] Unsupported name in file: {1:s}.'.format(
                self.NAME, parser_mediator.GetDisplayName()))
            return None, None

        # Value: can be integer, boolean or text select by Type ID.
        try:
            if type_id in [
                    self.TYPE_GENERAL_INTEGER, self.TYPE_INTEGER,
                    self.TYPE_ENUMERATION
            ]:
                value = self.INTEGER.parse_stream(file_object).integer

            elif type_id == self.TYPE_BOOL:
                value = bool(self.BOOLEAN.parse_stream(file_object).integer)

            elif type_id == self.TYPE_DATETIME:
                datetime = self.DATETIME.parse_stream(file_object)
                value = timelib.Timestamp.FromRFC2579Datetime(
                    datetime.year, datetime.month, datetime.day, datetime.hour,
                    datetime.minutes, datetime.seconds, datetime.deciseconds,
                    datetime.direction_from_utc, datetime.hours_from_utc,
                    datetime.minutes_from_utc)

            else:
                value = self.TEXT.parse_stream(file_object)

        except (IOError, UnicodeDecodeError, construct.FieldError):
            logging.warning(
                u'[{0:s}] Unsupported value in file: {1:s}.'.format(
                    self.NAME, parser_mediator.GetDisplayName()))
            return None, None

        return name, value
예제 #28
0
class AslParser(interface.FileObjectParser):
    """Parser for ASL log files."""

    _INITIAL_FILE_OFFSET = None

    NAME = u'asl_log'
    DESCRIPTION = u'Parser for ASL log files.'

    ASL_MAGIC = b'ASL DB\x00\x00\x00\x00\x00\x00'

    # ASL File header.
    # magic: magic number that identify ASL files.
    # version: version of the file.
    # offset: first record in the file.
    # timestamp: time when the first entry was written.
    #     Contains the number of seconds since January 1, 1970 00:00:00 UTC.
    # last_offset: last record in the file.
    ASL_HEADER_STRUCT = construct.Struct(u'asl_header_struct',
                                         construct.String(u'magic', 12),
                                         construct.UBInt32(u'version'),
                                         construct.UBInt64(u'offset'),
                                         construct.UBInt64(u'timestamp'),
                                         construct.UBInt32(u'cache_size'),
                                         construct.UBInt64(u'last_offset'),
                                         construct.Padding(36))

    # The record structure is:
    # [HEAP][STRUCTURE][4xExtraField][2xExtraField]*[PreviousEntry]
    # Record static structure.
    # tam_entry: it contains the number of bytes from this file position
    #            until the end of the record, without counts itself.
    # next_offset: next record. If is equal to 0x00, it is the last record.
    # asl_message_id: integer that has the numeric identification of the event.
    # timestamp: the entry creation date and time.
    #     Contains the number of seconds since January 1, 1970 00:00:00 UTC.
    # nanosecond: nanosecond to add to the timestamp.
    # level: level of priority.
    # pid: process identification that ask to save the record.
    # uid: user identification that has lunched the process.
    # gid: group identification that has lunched the process.
    # read_uid: identification id of a user. Only applied if is not -1 (all FF).
    #           Only root and this user can read the entry.
    # read_gid: the same than read_uid, but for the group.
    ASL_RECORD_STRUCT = construct.Struct(u'asl_record_struct',
                                         construct.Padding(2),
                                         construct.UBInt32(u'tam_entry'),
                                         construct.UBInt64(u'next_offset'),
                                         construct.UBInt64(u'asl_message_id'),
                                         construct.UBInt64(u'timestamp'),
                                         construct.UBInt32(u'nanosec'),
                                         construct.UBInt16(u'level'),
                                         construct.UBInt16(u'flags'),
                                         construct.UBInt32(u'pid'),
                                         construct.UBInt32(u'uid'),
                                         construct.UBInt32(u'gid'),
                                         construct.UBInt32(u'read_uid'),
                                         construct.UBInt32(u'read_gid'),
                                         construct.UBInt64(u'ref_pid'))

    ASL_RECORD_STRUCT_SIZE = ASL_RECORD_STRUCT.sizeof()

    # 8-byte fields, they can be:
    # - String: [Nibble = 1000 (8)][Nibble = Length][7 Bytes = String].
    # - Integer: integer that has the byte position in the file that points
    #            to an ASL_RECORD_DYN_VALUE struct. If the value of the integer
    #            is equal to 0, it means that it has not data (skip).

    # If the field is a String, we use this structure to decode each
    # integer byte in the corresponding character (ASCII Char).
    ASL_OCTET_STRING = construct.ExprAdapter(construct.Octet(u'string'),
                                             encoder=lambda obj, ctx: ord(obj),
                                             decoder=lambda obj, ctx: chr(obj))

    # Field string structure. If the first bit is 1, it means that it
    # is a String (1000) = 8, then the next nibble has the number of
    # characters. The last 7 bytes are the number of bytes.
    ASL_STRING = construct.BitStruct(
        u'string', construct.Flag(u'type'), construct.Bits(u'filler', 3),
        construct.If(lambda ctx: ctx.type, construct.Nibble(u'string_length')),
        construct.If(lambda ctx: ctx.type,
                     construct.Array(7, ASL_OCTET_STRING)))

    # 8-byte pointer to a byte position in the file.
    ASL_POINTER = construct.UBInt64(u'pointer')

    # Dynamic data structure pointed by a pointer that contains a String:
    # [2 bytes padding][4 bytes length of String][String].
    ASL_RECORD_DYN_VALUE = construct.Struct(
        u'asl_record_dyn_value', construct.Padding(2),
        construct.PascalString(u'value',
                               length_field=construct.UBInt32(u'length')))

    def ParseFileObject(self, parser_mediator, file_object, **kwargs):
        """Parses an ALS file-like object.

    Args:
      parser_mediator: A parser mediator object (instance of ParserMediator).
      file_object: A file-like object.

    Raises:
      UnableToParseFile: when the file cannot be parsed.
    """
        file_object.seek(0, os.SEEK_SET)

        try:
            header = self.ASL_HEADER_STRUCT.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            raise errors.UnableToParseFile(
                u'Unable to parse ASL Header with error: {0:s}.'.format(
                    exception))

        if header.magic != self.ASL_MAGIC:
            raise errors.UnableToParseFile(
                u'Not an ASL Header, unable to parse.')

        # Get the first and the last entry.
        offset = header.offset
        old_offset = header.offset
        last_offset_header = header.last_offset

        # If the ASL file has entries.
        if offset:
            event_object, offset = self.ReadAslEvent(file_object,
                                                     parser_mediator, offset)
            while event_object:
                parser_mediator.ProduceEvent(event_object)

                # Sanity check, the last read element must be the same as
                # indicated by the header.
                if offset == 0 and old_offset != last_offset_header:
                    parser_mediator.ProduceParseError(
                        u'Unable to parse header. Last element header does not match '
                        u'header offset.')
                old_offset = offset
                event_object, offset = self.ReadAslEvent(
                    file_object, parser_mediator, offset)

    def ReadAslEvent(self, file_object, parser_mediator, offset):
        """Reads an ASL record at a specific offset.

    Args:
      file_object: a file-like object that points to an ASL file.
      parser_mediator: A parser mediator object (instance of ParserMediator).
      offset: offset where the static part of the entry starts.

    Returns:
      A tuple of an event object extracted from the ASL record,
      and the offset to the next ASL record in the file.
    """
        # The heap of the entry is saved to try to avoid seek (performance issue).
        # It has the real start position of the entry.
        dynamic_start = file_object.tell()
        dynamic_part = file_object.read(offset - file_object.tell())

        if not offset:
            return None, None

        try:
            record_header = self.ASL_RECORD_STRUCT.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            logging.warning(
                u'Unable to parse ASL event with error: {0:s}'.format(
                    exception))
            return None, None

        # Variable tam_fields = is the real length of the dynamic fields.
        # We have this: [Record_Struct] + [Dynamic_Fields] + [Pointer_Entry_Before]
        # In Record_Struct we have a field called tam_entry, where it has the number
        # of bytes until the end of the entry from the position that the field is.
        # The tam_entry is between the 2th and the 6th byte in the [Record_Struct].
        # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before]
        # Also, we do not need [Point_Entry_Before] and then we delete the size of
        # [Point_Entry_Before] that it is 8 bytes (8):
        # tam_entry = ([Record_Struct]-6)+[Dynamic_Fields]+[Pointer_Entry_Before]
        # [Dynamic_Fields] = tam_entry - [Record_Struct] + 6 - 8
        # [Dynamic_Fields] = tam_entry - [Record_Struct] - 2
        tam_fields = record_header.tam_entry - self.ASL_RECORD_STRUCT_SIZE - 2

        # Dynamic part of the entry that contains minimal four fields of 8 bytes
        # plus 2x[8bytes] fields for each extra ASL_Field.
        # The four first fields are always the Host, Sender, Facility and Message.
        # After the four first fields, the entry might have extra ASL_Fields.
        # For each extra ASL_field, it has a pair of 8-byte fields where the first
        # 8 bytes contains the name of the extra ASL_field and the second 8 bytes
        # contains the text of the extra field.
        # All of this 8-byte field can be saved using one of these three different
        # types:
        # - Null value ('0000000000000000'): nothing to do.
        # - String: It is string if first bit = 1 or first nibble = 8 (1000).
        #           Second nibble has the length of string.
        #           The next 7 bytes have the text characters of the string
        #           padding the end with null characters: '0x00'.
        #           Example: [8468 6964 6400 0000]
        #                    [8] String, [4] length, value: [68 69 64 64] = hidd.
        # - Pointer: static position in the file to a special struct
        #            implemented as an ASL_RECORD_DYN_VALUE.
        #            Example: [0000 0000 0000 0077]
        #            It points to the file position 0x077 that has a
        #            ASL_RECORD_DYN_VALUE structure.
        values = []
        while tam_fields > 0:
            try:
                raw_field = file_object.read(8)
            except IOError as exception:
                logging.warning(
                    u'Unable to read ASL event with error: {0:d}'.format(
                        exception))
                return None, None

            try:
                # Try to read as a String.
                field = self.ASL_STRING.parse(raw_field)
                values.append(b''.join(field.string[0:field.string_length]))
                # Go to parse the next extra field.
                tam_fields -= 8
                continue
            except ValueError:
                pass
            # If it is not a string, it must be a pointer.
            try:
                field = self.ASL_POINTER.parse(raw_field)
            except ValueError as exception:
                logging.warning(
                    u'Unable to parse ASL event with error: {0:s}'.format(
                        exception))
                return None, None
            if field != 0:
                # The next IF ELSE is only for performance issues, avoiding seek.
                # If the pointer points a lower position than where the actual entry
                # starts, it means that it points to a previous entry.
                pos = field - dynamic_start
                # Bigger or equal 0 means that the data is in the actual entry.
                if pos >= 0:
                    try:
                        values.append((self.ASL_RECORD_DYN_VALUE.parse(
                            dynamic_part[pos:])).value.partition(b'\x00')[0])
                    except (IOError, construct.FieldError) as exception:
                        logging.warning(
                            u'Unable to parse ASL event with error: {0:s}'.
                            format(exception))
                        return None, None
                else:
                    # Only if it is a pointer that points to the
                    # heap from another entry we use the seek method.
                    main_position = file_object.tell()
                    # If the pointer is in a previous entry.
                    if main_position > field:
                        file_object.seek(field - main_position, os.SEEK_CUR)
                        try:
                            values.append(
                                (self.ASL_RECORD_DYN_VALUE.parse_stream(
                                    file_object)).value.partition(b'\x00')[0])
                        except (IOError, construct.FieldError):
                            logging.warning((
                                u'The pointer at {0:d} (0x{0:x}) points to invalid '
                                u'information.'
                            ).format(main_position -
                                     self.ASL_POINTER.sizeof()))
                        # Come back to the position in the entry.
                        _ = file_object.read(main_position -
                                             file_object.tell())
                    else:
                        _ = file_object.read(field - main_position)
                        values.append((self.ASL_RECORD_DYN_VALUE.parse_stream(
                            file_object)).value.partition(b'\x00')[0])
                        # Come back to the position in the entry.
                        file_object.seek(main_position - file_object.tell(),
                                         os.SEEK_CUR)
            # Next extra field: 8 bytes more.
            tam_fields -= 8

        # Read the last 8 bytes of the record that points to the previous entry.
        _ = file_object.read(8)

        # Parsed section, we translate the read data to an appropriate format.
        micro_seconds, _ = divmod(record_header.nanosec, 1000)
        record_position = offset
        message_id = record_header.asl_message_id

        # Parsing the dynamic values (text or pointers to position with text).
        # The first four are always the host, sender, facility, and message.
        number_of_values = len(values)

        if number_of_values < 4:
            parser_mediator.ProduceParseError(
                u'Less than four values read from an ASL event.')

        if number_of_values >= 1:
            computer_name = values[0]
        else:
            computer_name = u'N/A'

        if number_of_values >= 2:
            sender = values[1]
        else:
            sender = u'N/A'

        if number_of_values >= 3:
            facility = values[2]
        else:
            facility = u'N/A'

        if number_of_values >= 4:
            message = values[3]
        else:
            message = u'N/A'

        # If the entry has an extra fields, they works as a pairs:
        # The first is the name of the field and the second the value.
        extra_information = u''
        if number_of_values > 4 and number_of_values % 2 == 0:
            # Taking all the extra attributes and merging them together,
            # eg: a = [1, 2, 3, 4] will look like "1: 2, 3: 4".
            try:
                extra_values = map(unicode, values[4:])
                extra_information = u', '.join(
                    map(u': '.join, zip(extra_values[0::2],
                                        extra_values[1::2])))
            except UnicodeDecodeError as exception:
                parser_mediator.ProduceParseError(
                    u'Unable to decode all ASL values in the extra information fields.'
                )

        # Return the event and the offset for the next entry.
        event_object = AslEvent(record_header.timestamp,
                                record_position,
                                message_id,
                                record_header.level,
                                record_header,
                                record_header.read_uid,
                                record_header.read_gid,
                                computer_name,
                                sender,
                                facility,
                                message,
                                extra_information,
                                micro_seconds=micro_seconds)
        return (event_object, record_header.next_offset)
예제 #29
0
class JavaIDXParser(interface.BaseParser):
    """Parse Java IDX files for download events.

  There are five structures defined. 6.02 files had one generic section
  that retained all data. From 6.03, the file went to a multi-section
  format where later sections were optional and had variable-lengths.
  6.03, 6.04, and 6.05 files all have their main data section (#2)
  begin at offset 128. The short structure is because 6.05 files
  deviate after the 8th byte. So, grab the first 8 bytes to ensure it's
  valid, get the file version, then continue on with the correct
  structures.
  """

    NAME = 'java_idx'
    DESCRIPTION = u'Parser for Java IDX files.'

    IDX_SHORT_STRUCT = construct.Struct('magic', construct.UBInt8('busy'),
                                        construct.UBInt8('incomplete'),
                                        construct.UBInt32('idx_version'))

    IDX_602_STRUCT = construct.Struct(
        'IDX_602_Full', construct.UBInt16('null_space'),
        construct.UBInt8('shortcut'), construct.UBInt32('content_length'),
        construct.UBInt64('last_modified_date'),
        construct.UBInt64('expiration_date'),
        construct.PascalString('version_string',
                               length_field=construct.UBInt16('length')),
        construct.PascalString('url',
                               length_field=construct.UBInt16('length')),
        construct.PascalString('namespace',
                               length_field=construct.UBInt16('length')),
        construct.UBInt32('FieldCount'))

    IDX_605_SECTION_ONE_STRUCT = construct.Struct(
        'IDX_605_Section1', construct.UBInt8('shortcut'),
        construct.UBInt32('content_length'),
        construct.UBInt64('last_modified_date'),
        construct.UBInt64('expiration_date'),
        construct.UBInt64('validation_date'), construct.UBInt8('signed'),
        construct.UBInt32('sec2len'), construct.UBInt32('sec3len'),
        construct.UBInt32('sec4len'))

    IDX_605_SECTION_TWO_STRUCT = construct.Struct(
        'IDX_605_Section2',
        construct.PascalString('version',
                               length_field=construct.UBInt16('length')),
        construct.PascalString('url',
                               length_field=construct.UBInt16('length')),
        construct.PascalString('namespec',
                               length_field=construct.UBInt16('length')),
        construct.PascalString('ip_address',
                               length_field=construct.UBInt16('length')),
        construct.UBInt32('FieldCount'))

    # Java uses Pascal-style strings, but with a 2-byte length field.
    JAVA_READUTF_STRING = construct.Struct(
        'Java.ReadUTF',
        construct.PascalString('string',
                               length_field=construct.UBInt16('length')))

    def Parse(self, parser_context, file_entry):
        """Extract data from a Java cache IDX file.

    This is the main parsing engine for the parser. It determines if
    the selected file is a proper IDX file. It then checks the file
    version to determine the correct structure to apply to extract
    data.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      file_entry: A file entry object (instance of dfvfs.FileEntry).
    """
        file_object = file_entry.GetFileObject()
        try:
            magic = self.IDX_SHORT_STRUCT.parse_stream(file_object)
        except (IOError, construct.FieldError) as exception:
            raise errors.UnableToParseFile(
                u'Unable to parse Java IDX file with error: {0:s}.'.format(
                    exception))

        # Fields magic.busy and magic.incomplete are normally 0x00. They
        # are set to 0x01 if the file is currently being downloaded. Logic
        # checks for > 1 to avoid a race condition and still reject any
        # file with other data.
        # Field magic.idx_version is the file version, of which only
        # certain versions are supported.
        if magic.busy > 1 or magic.incomplete > 1:
            raise errors.UnableToParseFile(u'Not a valid Java IDX file')

        if not magic.idx_version in [602, 603, 604, 605]:
            raise errors.UnableToParseFile(u'Not a valid Java IDX file')

        # Obtain the relevant values from the file. The last modified date
        # denotes when the file was last modified on the HOST. For example,
        # when the file was uploaded to a web server.
        if magic.idx_version == 602:
            section_one = self.IDX_602_STRUCT.parse_stream(file_object)
            last_modified_date = section_one.last_modified_date
            url = section_one.url
            ip_address = 'Unknown'
            http_header_count = section_one.FieldCount
        elif magic.idx_version in [603, 604, 605]:

            # IDX 6.03 and 6.04 have two unused bytes before the structure.
            if magic.idx_version in [603, 604]:
                file_object.read(2)

            # IDX 6.03, 6.04, and 6.05 files use the same structures for the
            # remaining data.
            section_one = self.IDX_605_SECTION_ONE_STRUCT.parse_stream(
                file_object)
            last_modified_date = section_one.last_modified_date
            if file_object.get_size() > 128:
                file_object.seek(128)  # Static offset for section 2.
                section_two = self.IDX_605_SECTION_TWO_STRUCT.parse_stream(
                    file_object)
                url = section_two.url
                ip_address = section_two.ip_address
                http_header_count = section_two.FieldCount
            else:
                url = 'Unknown'
                ip_address = 'Unknown'
                http_header_count = 0

        # File offset is now just prior to HTTP headers. Make sure there
        # are headers, and then parse them to retrieve the download date.
        download_date = None
        for field in range(0, http_header_count):
            field = self.JAVA_READUTF_STRING.parse_stream(file_object)
            value = self.JAVA_READUTF_STRING.parse_stream(file_object)
            if field.string == 'date':
                # Time string "should" be in UTC or have an associated time zone
                # information in the string itself. If that is not the case then
                # there is no reliable method for plaso to determine the proper
                # timezone, so the assumption is that it is UTC.
                download_date = timelib.Timestamp.FromTimeString(
                    value.string, gmt_as_timezone=False)

        if not url or not ip_address:
            raise errors.UnableToParseFile(
                u'Unexpected Error: URL or IP address not found in file.')

        last_modified_timestamp = timelib.Timestamp.FromJavaTime(
            last_modified_date)
        # TODO: Move the timestamp description fields into eventdata.
        event_object = JavaIDXEvent(last_modified_timestamp,
                                    'File Hosted Date', magic.idx_version, url,
                                    ip_address)
        parser_context.ProduceEvent(event_object,
                                    parser_name=self.NAME,
                                    file_entry=file_entry)

        if section_one:
            expiration_date = section_one.get('expiration_date', None)
            if expiration_date:
                expiration_timestamp = timelib.Timestamp.FromJavaTime(
                    expiration_date)
                event_object = JavaIDXEvent(expiration_timestamp,
                                            'File Expiration Date',
                                            magic.idx_version, url, ip_address)
                parser_context.ProduceEvent(event_object,
                                            parser_name=self.NAME,
                                            file_entry=file_entry)

        if download_date:
            event_object = JavaIDXEvent(
                download_date, eventdata.EventTimestamp.FILE_DOWNLOADED,
                magic.idx_version, url, ip_address)
            parser_context.ProduceEvent(event_object,
                                        parser_name=self.NAME,
                                        file_entry=file_entry)
예제 #30
0
packet = cs.Struct(
    "head" / cs.Const(b"\x99\xAA"),
    "len" / cs.Int32sl,
    "opcode" / cs.Int32sl,
    # subsctract the 4 byte (opcode) + len 4 byte + len 4 id
    "rest_data" / cs.Bytes(cs.this.len - 8),
    "end" / cs.Const(b"\xAA\x99"),
)

vector_packet = cs.Struct("X" / cs.Float32l, "Y" / cs.Float32l,
                          "Z" / cs.Float32l)

authentication_packet = cs.Struct(
    # "id" / cs.Int32sl,
    "username" / cs.PascalString(cs.Int32sl, "utf8"), )
"""
    TODO: find a way to re user packet parts
"""
send_vector_packet = cs.Struct(
    "head" / cs.Const(b"\x99\xAA"),
    "len" / cs.Int32sl,
    "opcode" / cs.Int32sl,  # 4
    "id" / cs.Int32sl,  # 4
    "X" / cs.Float32l,  # 4
    "Y" / cs.Float32l,  # 4
    "Z" / cs.Float32l,  # 4
    "end" / cs.Const(b"\xAA\x99"),
)

send_authentication_packet = cs.Struct(