Exemple #1
0
 def __init__(self):
     """Initializes the front-end object."""
     super(ImageExportFrontend, self).__init__()
     self._filter_collection = FileEntryFilterCollection()
     self._knowledge_base = None
     self._resolver_context = context.Context()
 def setUp(self):
     """Sets up the needed objects used throughout the test."""
     self._resolver_context = context.Context()
     self._fake_path_spec = fake_path_spec.FakePathSpec(location=u'/')
Exemple #3
0
class Resolver(object):
    """Path specification resolver."""

    _resolver_context = context.Context()
    _resolver_helpers_manager = None

    key_chain = keychain.KeyChain()

    @classmethod
    def _GetResolverHelper(cls, type_indicator):
        """Retrieves the path specification resolver helper for the specified type.

    Args:
      type_indicator (str): type indicator.

    Returns:
      ResolverHelper: a resolver helper.
    """
        if not cls._resolver_helpers_manager:
            # Delay the import of the resolver helpers manager to prevent circular
            # imports.
            from dfvfs.resolver_helpers import manager  # pylint: disable=import-outside-toplevel

            cls._resolver_helpers_manager = manager.ResolverHelperManager

        return cls._resolver_helpers_manager.GetHelper(type_indicator)

    @classmethod
    def OpenFileEntry(cls, path_spec_object, resolver_context=None):
        """Opens a file entry object defined by path specification.

    Args:
      path_spec_object (PathSpec): path specification.
      resolver_context (Optional[Context]): resolver context, where None
          represents the built in context which is not multi process safe.

    Returns:
      FileEntry: file entry or None if the path specification could not be
          resolved.
    """
        file_system = cls.OpenFileSystem(path_spec_object,
                                         resolver_context=resolver_context)

        if resolver_context is None:
            resolver_context = cls._resolver_context

        file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)

        # Release the file system so it will be removed from the cache
        # when the file entry is destroyed.
        resolver_context.ReleaseFileSystem(file_system)

        return file_entry

    @classmethod
    def OpenFileObject(cls, path_spec_object, resolver_context=None):
        """Opens a file-like object defined by path specification.

    Args:
      path_spec_object (PathSpec): path specification.
      resolver_context (Optional[Context]): resolver context, where None
          represents the built in context which is not multi process safe.

    Returns:
      FileIO: file-like object or None if the path specification could not
          be resolved.

    Raises:
      MountPointError: if the mount point specified in the path specification
          does not exist.
      PathSpecError: if the path specification is incorrect.
      TypeError: if the path specification type is unsupported.
    """
        if not isinstance(path_spec_object, path_spec.PathSpec):
            raise TypeError('Unsupported path specification type.')

        if resolver_context is None:
            resolver_context = cls._resolver_context

        if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
            if path_spec_object.HasParent():
                raise errors.PathSpecError(
                    'Unsupported mount path specification with parent.')

            mount_point = getattr(path_spec_object, 'identifier', None)
            if not mount_point:
                raise errors.PathSpecError(
                    'Unsupported path specification without mount point identifier.'
                )

            path_spec_object = mount_manager.MountPointManager.GetMountPoint(
                mount_point)
            if not path_spec_object:
                raise errors.MountPointError(
                    'No such mount point: {0:s}'.format(mount_point))

        file_object = resolver_context.GetFileObject(path_spec_object)
        if not file_object:
            resolver_helper = cls._GetResolverHelper(
                path_spec_object.type_indicator)
            file_object = resolver_helper.NewFileObject(resolver_context)

        file_object.open(path_spec=path_spec_object)
        return file_object

    @classmethod
    def OpenFileSystem(cls, path_spec_object, resolver_context=None):
        """Opens a file system object defined by path specification.

    Args:
      path_spec_object (PathSpec): path specification.
      resolver_context (Optional[Context]): resolver context, where None
          represents the built in context which is not multi process safe.

    Returns:
      FileSystem: file system or None if the path specification could not
          be resolved or has no file system object.

    Raises:
      AccessError: if the access to open the file system was denied.
      BackEndError: if the file system cannot be opened.
      MountPointError: if the mount point specified in the path specification
          does not exist.
      PathSpecError: if the path specification is incorrect.
      TypeError: if the path specification type is unsupported.
    """
        if not isinstance(path_spec_object, path_spec.PathSpec):
            raise TypeError('Unsupported path specification type.')

        if resolver_context is None:
            resolver_context = cls._resolver_context

        if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
            if path_spec_object.HasParent():
                raise errors.PathSpecError(
                    'Unsupported mount path specification with parent.')

            mount_point = getattr(path_spec_object, 'identifier', None)
            if not mount_point:
                raise errors.PathSpecError(
                    'Unsupported path specification without mount point identifier.'
                )

            path_spec_object = mount_manager.MountPointManager.GetMountPoint(
                mount_point)
            if not path_spec_object:
                raise errors.MountPointError(
                    'No such mount point: {0:s}'.format(mount_point))

        file_system = resolver_context.GetFileSystem(path_spec_object)
        if not file_system:
            resolver_helper = cls._GetResolverHelper(
                path_spec_object.type_indicator)
            file_system = resolver_helper.NewFileSystem(resolver_context)

        try:
            file_system.Open(path_spec_object)
        except (IOError, ValueError) as exception:
            raise errors.BackEndError(
                'Unable to open file system with error: {0!s}'.format(
                    exception))

        return file_system
Exemple #4
0
 def setUp(self):
     """Sets up the needed objects used throughout the test."""
     self._resolver_context = context.Context()
     test_file = os.path.join(u'test_data', u'image.qcow2')
     path_spec = os_path_spec.OSPathSpec(location=test_file)
     self._qcow_path_spec = qcow_path_spec.QCOWPathSpec(parent=path_spec)
Exemple #5
0
  def __init__(
      self, maximum_number_of_tasks=None, number_of_worker_processes=0,
      worker_memory_limit=None, worker_timeout=None):
    """Initializes an engine.

    Args:
      maximum_number_of_tasks (Optional[int]): maximum number of concurrent
          tasks, where 0 represents no limit.
      number_of_worker_processes (Optional[int]): number of worker processes.
      worker_memory_limit (Optional[int]): maximum amount of memory a worker is
          allowed to consume, where None represents the default memory limit
          and 0 represents no limit.
      worker_timeout (Optional[float]): number of minutes before a worker
          process that is not providing status updates is considered inactive,
          where None or 0.0 represents the default timeout.
    """
    if maximum_number_of_tasks is None:
      maximum_number_of_tasks = self._MAXIMUM_NUMBER_OF_TASKS

    if number_of_worker_processes < 1:
      # One worker for each "available" CPU (minus other processes).
      # The number here is derived from the fact that the engine starts up:
      # * A main process.
      #
      # If we want to utilize all CPUs on the system we therefore need to start
      # up workers that amounts to the total number of CPUs - the other
      # processes.
      try:
        cpu_count = multiprocessing.cpu_count() - 1

        if cpu_count <= self._WORKER_PROCESSES_MINIMUM:
          cpu_count = self._WORKER_PROCESSES_MINIMUM

        elif cpu_count >= self._WORKER_PROCESSES_MAXIMUM:
          cpu_count = self._WORKER_PROCESSES_MAXIMUM

      except NotImplementedError:
        logger.error((
            'Unable to determine number of CPUs defaulting to {0:d} worker '
            'processes.').format(self._WORKER_PROCESSES_MINIMUM))
        cpu_count = self._WORKER_PROCESSES_MINIMUM

      number_of_worker_processes = cpu_count

    if worker_memory_limit is None:
      worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT

    if not worker_timeout:
      worker_timeout = definitions.DEFAULT_WORKER_TIMEOUT

    super(TaskMultiProcessEngine, self).__init__()
    self._enable_sigsegv_handler = False
    self._last_worker_number = 0
    self._maximum_number_of_tasks = maximum_number_of_tasks
    self._merge_task = None
    self._merge_task_on_hold = None
    self._number_of_consumed_event_tags = 0
    self._number_of_consumed_events = 0
    self._number_of_consumed_reports = 0
    self._number_of_consumed_sources = 0
    self._number_of_consumed_warnings = 0
    self._number_of_produced_event_tags = 0
    self._number_of_produced_events = 0
    self._number_of_produced_reports = 0
    self._number_of_produced_sources = 0
    self._number_of_produced_warnings = 0
    self._number_of_worker_processes = number_of_worker_processes
    self._path_spec_extractor = extractors.PathSpecExtractor()
    self._processing_configuration = None
    self._redis_client = None
    self._resolver_context = context.Context()
    self._session_identifier = None
    self._status = definitions.STATUS_INDICATOR_IDLE
    self._storage_merge_reader = None
    self._storage_merge_reader_on_hold = None
    self._task_queue = None
    self._task_queue_port = None
    self._task_manager = task_manager.TaskManager()
    self._worker_memory_limit = worker_memory_limit
    self._worker_timeout = worker_timeout
Exemple #6
0
 def setUp(self):
     """Sets up the needed objects used throughout the test."""
     self._resolver_context = context.Context()
     test_file = self._GetTestFilePath(['lvmtest.qcow2'])
     path_spec = os_path_spec.OSPathSpec(location=test_file)
     self._qcow_path_spec = qcow_path_spec.QCOWPathSpec(parent=path_spec)
Exemple #7
0
    def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
        """Extracts relevant user timestamp entries.

    Args:
      parser_mediator: A parser mediator object (instance of ParserMediator).
      match: Optional dictionary containing keys extracted from PLIST_KEYS.
    """
        if u'name' not in match or u'uid' not in match:
            return

        account = match[u'name'][0]
        uid = match[u'uid'][0]
        cocoa_zero = (timelib.Timestamp.COCOA_TIME_TO_POSIX_BASE *
                      timelib.Timestamp.MICRO_SECONDS_PER_SECOND)

        # INFO: binplist return a string with the Plist XML.
        for policy in match.get(u'passwordpolicyoptions', []):
            try:
                xml_policy = ElementTree.fromstring(policy)
            except (ElementTree.ParseError, LookupError) as exception:
                logging.error((
                    u'Unable to parse XML structure for an user policy, account: '
                    u'{0:s} and uid: {1!s}, with error: {2:s}').format(
                        account, uid, exception))
                continue

            for dict_elements in xml_policy.iterfind(u'dict'):
                key_values = [
                    value.text for value in dict_elements.getchildren()
                ]
                # Taking a list and converting it to a dict, using every other item
                # as the key and the other one as the value.
                policy_dict = dict(zip(key_values[0::2], key_values[1::2]))

            time_string = policy_dict.get(u'passwordLastSetTime', None)
            if time_string:
                try:
                    timestamp = timelib.Timestamp.FromTimeString(time_string)
                except errors.TimestampError:
                    parser_mediator.ProduceParseError(
                        u'Unable to parse time string: {0:s}'.format(
                            time_string))
                    timestamp = 0

                shadow_hash_data = match.get(u'ShadowHashData', None)
                if timestamp > cocoa_zero and isinstance(
                        shadow_hash_data, (list, tuple)):
                    # Extract the hash password information.
                    # It is store in the attribute ShadowHasData which is
                    # a binary plist data; However binplist only extract one
                    # level of binary plist, then it returns this information
                    # as a string.

                    # TODO: change this into a DataRange instead. For this we
                    # need the file offset and size of the ShadowHashData value data.
                    shadow_hash_data = shadow_hash_data[0]

                    resolver_context = context.Context()
                    fake_file = fake_file_io.FakeFile(resolver_context,
                                                      shadow_hash_data)
                    fake_file.open(path_spec=fake_path_spec.FakePathSpec(
                        location=u'ShadowHashData'))

                    try:
                        plist_file = binplist.BinaryPlist(file_obj=fake_file)
                        top_level = plist_file.Parse()
                    except binplist.FormatError:
                        top_level = dict()
                    salted_hash = top_level.get(u'SALTED-SHA512-PBKDF2', None)
                    if salted_hash:
                        password_hash = u'$ml${0:d}${1:s}${2:s}'.format(
                            salted_hash[u'iterations'],
                            binascii.hexlify(salted_hash[u'salt']),
                            binascii.hexlify(salted_hash[u'entropy']))
                    else:
                        password_hash = u'N/A'
                    description = (
                        u'Last time {0:s} ({1!s}) changed the password: {2!s}'
                    ).format(account, uid, password_hash)
                    event_object = plist_event.PlistTimeEvent(
                        self._ROOT, u'passwordLastSetTime', timestamp,
                        description)
                    parser_mediator.ProduceEvent(event_object)

            time_string = policy_dict.get(u'lastLoginTimestamp', None)
            if time_string:
                try:
                    timestamp = timelib.Timestamp.FromTimeString(time_string)
                except errors.TimestampError:
                    parser_mediator.ProduceParseError(
                        u'Unable to parse time string: {0:s}'.format(
                            time_string))
                    timestamp = 0

                description = u'Last login from {0:s} ({1!s})'.format(
                    account, uid)
                if timestamp > cocoa_zero:
                    event_object = plist_event.PlistTimeEvent(
                        self._ROOT, u'lastLoginTimestamp', timestamp,
                        description)
                    parser_mediator.ProduceEvent(event_object)

            time_string = policy_dict.get(u'failedLoginTimestamp', None)
            if time_string:
                try:
                    timestamp = timelib.Timestamp.FromTimeString(time_string)
                except errors.TimestampError:
                    parser_mediator.ProduceParseError(
                        u'Unable to parse time string: {0:s}'.format(
                            time_string))
                    timestamp = 0

                description = (
                    u'Last failed login from {0:s} ({1!s}) ({2!s} times)'
                ).format(account, uid, policy_dict.get(u'failedLoginCount', 0))
                if timestamp > cocoa_zero:
                    event_object = plist_event.PlistTimeEvent(
                        self._ROOT, u'failedLoginTimestamp', timestamp,
                        description)
                    parser_mediator.ProduceEvent(event_object)
Exemple #8
0
class Resolver(object):
    """Class that implements the path specification resolver."""

    _resolver_context = context.Context()
    _resolver_helpers = {}

    key_chain = keychain.KeyChain()

    @classmethod
    def DeregisterHelper(cls, resolver_helper):
        """Deregisters a path specification resolver helper.

    Args:
      resolver_helper: the resolver helper object (instance of
                       ResolverHelper).

    Raises:
      KeyError: if resolver helper object is not set for the corresponding
                type indicator.
    """
        if resolver_helper.type_indicator not in cls._resolver_helpers:
            raise KeyError(
                u'Resolver helper object not set for type indicator: {0:s}.'.
                format(resolver_helper.type_indicator))

        del cls._resolver_helpers[resolver_helper.type_indicator]

    @classmethod
    def OpenFileEntry(cls, path_spec_object, resolver_context=None):
        """Opens a file entry object defined by path specification.

    Args:
      path_spec_object: the path specification (instance of PathSpec).
      resolver_context: the optional resolver context (instance of
                        resolver.Context). The default is None which will use
                        the built in context which is not multi process safe.

    Returns:
      The file entry object (instance of vfs.FileEntry) or None if the path
      specification could not be resolved.
    """
        file_system = cls.OpenFileSystem(path_spec_object,
                                         resolver_context=resolver_context)

        if resolver_context is None:
            resolver_context = cls._resolver_context

        file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)

        # Release the file system so it will be removed from the cache
        # when the file entry is destroyed.
        resolver_context.ReleaseFileSystem(file_system)

        return file_entry

    @classmethod
    def OpenFileObject(cls, path_spec_object, resolver_context=None):
        """Opens a file-like object defined by path specification.

    Args:
      path_spec_object: the path specification (instance of PathSpec).
      resolver_context: the optional resolver context (instance of
                        resolver.Context). The default is None which will use
                        the built in context which is not multi process safe.

    Returns:
      The file-like object (instance of file.FileIO) or None if the path
      specification could not be resolved.

    Raises:
      KeyError: if resolver helper object is not set for the corresponding
                type indicator.
      PathSpecError: if the path specification is incorrect.
      TypeError: if the path specification type is unsupported.
    """
        if not isinstance(path_spec_object, path_spec.PathSpec):
            raise TypeError(u'Unsupported path specification type.')

        if resolver_context is None:
            resolver_context = cls._resolver_context

        if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
            if path_spec_object.HasParent():
                raise errors.PathSpecError(
                    u'Unsupported mount path specification with parent.')

            mount_point = getattr(path_spec_object, u'identifier', None)
            if not mount_point:
                raise errors.PathSpecError(
                    u'Unsupported path specification without mount point identifier.'
                )

            path_spec_object = mount_manager.MountPointManager.GetMountPoint(
                mount_point)
            if not path_spec_object:
                raise errors.MountPointError(
                    u'No such mount point: {0:s}'.format(mount_point))

        file_object = resolver_context.GetFileObject(path_spec_object)
        if not file_object:
            if path_spec_object.type_indicator not in cls._resolver_helpers:
                raise KeyError(
                    (u'Resolver helper object not set for type indicator: '
                     u'{0:s}.').format(path_spec_object.type_indicator))

            resolver_helper = cls._resolver_helpers[
                path_spec_object.type_indicator]
            file_object = resolver_helper.NewFileObject(resolver_context)

        file_object.open(path_spec=path_spec_object)
        return file_object

    @classmethod
    def OpenFileSystem(cls, path_spec_object, resolver_context=None):
        """Opens a file system object defined by path specification.

    Args:
      path_spec_object: the path specification (instance of PathSpec).
      resolver_context: the optional resolver context (instance of
                        resolver.Context). The default is None which will use
                        the built in context which is not multi process safe.

    Returns:
      The file system object (instance of vfs.FileSystem) or None if the path
      specification could not be resolved or has no file system object.

    Raises:
      AccessError: if the access to open the file system was denied.
      BackEndError: if the file system cannot be opened.
      KeyError: if resolver helper object is not set for the corresponding
                type indicator.
      PathSpecError: if the path specification is incorrect.
      TypeError: if the path specification type is unsupported.
    """
        if not isinstance(path_spec_object, path_spec.PathSpec):
            raise TypeError(u'Unsupported path specification type.')

        if resolver_context is None:
            resolver_context = cls._resolver_context

        if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
            if path_spec_object.HasParent():
                raise errors.PathSpecError(
                    u'Unsupported mount path specification with parent.')

            mount_point = getattr(path_spec_object, u'identifier', None)
            if not mount_point:
                raise errors.PathSpecError(
                    u'Unsupported path specification without mount point identifier.'
                )

            path_spec_object = mount_manager.MountPointManager.GetMountPoint(
                mount_point)
            if not path_spec_object:
                raise errors.MountPointError(
                    u'No such mount point: {0:s}'.format(mount_point))

        file_system = resolver_context.GetFileSystem(path_spec_object)
        if not file_system:
            if path_spec_object.type_indicator not in cls._resolver_helpers:
                raise KeyError(
                    (u'Resolver helper object not set for type indicator: '
                     u'{0:s}.').format(path_spec_object.type_indicator))

            resolver_helper = cls._resolver_helpers[
                path_spec_object.type_indicator]
            file_system = resolver_helper.NewFileSystem(resolver_context)

        try:
            file_system.Open(path_spec_object)
        except (errors.AccessError, errors.PathSpecError):
            raise
        except (IOError, ValueError) as exception:
            raise errors.BackEndError(
                u'Unable to open file system with error: {0:s}'.format(
                    exception))

        return file_system

    @classmethod
    def RegisterHelper(cls, resolver_helper):
        """Registers a path specification resolver helper.

    Args:
      resolver_helper: the resolver helper object (instance of
                       ResolverHelper).

    Raises:
      KeyError: if resolver helper object is already set for the corresponding
                type indicator.
    """
        if resolver_helper.type_indicator in cls._resolver_helpers:
            raise KeyError(
                (u'Resolver helper object already set for type indicator: '
                 u'{0!s}.').format(resolver_helper.type_indicator))

        cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper
Exemple #9
0
    def _TestImageWithPartitionsCollections(self, collect_directory_metadata):
        """Test collection on a storage media image with multiple partitions.

    The image contains 2 partitions (p1 and p2) with NFTS file systems.

    Args:
      collect_directory_metadata: boolean value to indicate to collect
                                  directory metadata.
    """
        # Note that the source file is a RAW (VMDK flat) image.
        test_file = self._GetTestFilePath([u'multi_partition_image.vmdk'])

        image_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file)

        p1_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            location=u'/p1',
            part_index=2,
            start_offset=0x00010000,
            parent=image_path_spec)
        p1_file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK,
            location=u'/',
            parent=p1_path_spec)

        p2_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            location=u'/p2',
            part_index=3,
            start_offset=0x00510000,
            parent=image_path_spec)
        p2_file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK,
            location=u'/',
            parent=p2_path_spec)

        test_path_spec_queue = single_process.SingleProcessQueue()
        resolver_context = context.Context()
        test_collector = collector.Collector(test_path_spec_queue,
                                             resolver_context=resolver_context)
        test_collector.SetCollectDirectoryMetadata(collect_directory_metadata)

        test_collector.Collect(
            [p1_file_system_path_spec, p2_file_system_path_spec])

        test_collector_queue_consumer = TestCollectorQueueConsumer(
            test_path_spec_queue)
        test_collector_queue_consumer.ConsumeItems()

        paths = test_collector_queue_consumer.GetFilePaths()

        expected_paths_p1 = [
            u'/$AttrDef', u'/$BadClus', u'/$BadClus:$Bad', u'/$Bitmap',
            u'/$Boot', u'/$Extend/$ObjId', u'/$Extend/$Quota',
            u'/$Extend/$Reparse', u'/$Extend/$RmMetadata/$Repair',
            u'/$Extend/$RmMetadata/$Repair:$Config', u'/$LogFile', u'/$MFT',
            u'/$MFTMirr', u'/$Secure', u'/$Secure:$SDS', u'/$UpCase',
            u'/$Volume', u'/file1.txt', u'/file2.txt'
        ]

        if collect_directory_metadata:
            expected_directory_metadata_paths_p1 = [
                u'/$Extend',
                u'/$Extend/$RmMetadata',
                u'/$Extend/$RmMetadata/$TxfLog',
            ]

        expected_paths_p2 = [
            u'/$AttrDef', u'/$BadClus', u'/$BadClus:$Bad', u'/$Bitmap',
            u'/$Boot', u'/$Extend/$ObjId', u'/$Extend/$Quota',
            u'/$Extend/$Reparse', u'/$Extend/$RmMetadata/$Repair',
            u'/$Extend/$RmMetadata/$Repair:$Config', u'/$LogFile', u'/$MFT',
            u'/$MFTMirr', u'/$Secure', u'/$Secure:$SDS', u'/$UpCase',
            u'/$Volume', u'/file1_on_part_2.txt', u'/file2_on_part_2.txt'
        ]

        if collect_directory_metadata:
            expected_directory_metadata_paths_p2 = [
                u'/$Extend',
                u'/$Extend/$RmMetadata',
                u'/$Extend/$RmMetadata/$TxfLog',
            ]

        expected_paths = []
        expected_paths.extend(expected_paths_p1)
        expected_paths.extend(expected_paths_p2)

        if collect_directory_metadata:
            expected_paths.extend(expected_directory_metadata_paths_p1)
            expected_paths.extend(expected_directory_metadata_paths_p2)

        self.assertEqual(test_collector_queue_consumer.number_of_path_specs,
                         len(expected_paths))

        self.assertEqual(sorted(paths), sorted(expected_paths))
Exemple #10
0
 def setUp(self):
     """Sets up the needed objects used throughout the test."""
     self._resolver_context = context.Context()
     test_file = os.path.join(u'test_data', u'syslog.gz')
     path_spec = os_path_spec.OSPathSpec(location=test_file)
     self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec)
Exemple #11
0
    def testExtractionWorker(self):
        """Tests the extraction worker functionality."""
        collection_queue = single_process.SingleProcessQueue()
        storage_queue = single_process.SingleProcessQueue()
        parse_error_queue = single_process.SingleProcessQueue()

        event_queue_producer = single_process.SingleProcessItemQueueProducer(
            storage_queue)
        parse_error_queue_producer = single_process.SingleProcessItemQueueProducer(
            parse_error_queue)

        knowledge_base_object = knowledge_base.KnowledgeBase()

        parser_mediator = parsers_mediator.ParserMediator(
            event_queue_producer, parse_error_queue_producer,
            knowledge_base_object)

        resolver_context = context.Context()

        extraction_worker = worker.BaseEventExtractionWorker(
            0,
            collection_queue,
            event_queue_producer,
            parse_error_queue_producer,
            parser_mediator,
            resolver_context=resolver_context)

        self.assertNotEqual(extraction_worker, None)

        extraction_worker.InitializeParserObjects()

        # Process a file.
        source_path = self._GetTestFilePath([u'syslog'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 16)

        # Process a compressed file.
        source_path = self._GetTestFilePath([u'syslog.gz'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 16)

        source_path = self._GetTestFilePath([u'syslog.bz2'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 15)

        # Process a file in an archive.
        source_path = self._GetTestFilePath([u'syslog.tar'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TAR,
            location=u'/syslog',
            parent=path_spec)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 13)

        # Process an archive file without "process archive files" mode.
        extraction_worker.SetProcessArchiveFiles(False)

        source_path = self._GetTestFilePath([u'syslog.tar'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 3)

        # Process an archive file with "process archive files" mode.
        extraction_worker.SetProcessArchiveFiles(True)

        source_path = self._GetTestFilePath([u'syslog.tar'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 16)

        # Process a file in a compressed archive.
        source_path = self._GetTestFilePath([u'syslog.tgz'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=path_spec)
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TAR,
            location=u'/syslog',
            parent=path_spec)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 13)

        # Process an archive file with "process archive files" mode.
        extraction_worker.SetProcessArchiveFiles(True)

        source_path = self._GetTestFilePath([u'syslog.tgz'])
        path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)

        collection_queue.PushItem(path_spec)
        extraction_worker.Run()

        test_queue_consumer = test_lib.TestQueueConsumer(storage_queue)
        test_queue_consumer.ConsumeItems()

        self.assertEqual(test_queue_consumer.number_of_items, 17)
Exemple #12
0
 def __init__(self):
     """Initializes a fake file system builder."""
     super(FakeFileSystemBuilder, self).__init__()
     resolver_context = context.Context()
     self.file_system = fake_file_system.FakeFileSystem(resolver_context)
Exemple #13
0
    def _Main(self):
        """The main loop."""
        self._event_queue_producer = queue.ItemQueueProducer(
            self._event_object_queue)
        self._parse_error_queue_producer = queue.ItemQueueProducer(
            self._parse_error_queue)

        parser_mediator = parsers_mediator.ParserMediator(
            self._event_queue_producer, self._parse_error_queue_producer,
            self._knowledge_base)

        # We need a resolver context per process to prevent multi processing
        # issues with file objects stored in images.
        resolver_context = context.Context()

        self._extraction_worker = worker.BaseEventExtractionWorker(
            self._worker_number,
            self._path_spec_queue,
            self._event_queue_producer,
            self._parse_error_queue_producer,
            parser_mediator,
            resolver_context=resolver_context)

        self._extraction_worker.SetEnableDebugOutput(self._enable_debug_output)

        self._extraction_worker.SetEnableProfiling(
            self._enable_profiling,
            profiling_sample_rate=self._profiling_sample_rate,
            profiling_type=self._profiling_type)

        self._extraction_worker.SetProcessArchiveFiles(
            self._process_archive_files)

        if self._filter_object:
            self._extraction_worker.SetFilterObject(self._filter_object)

        if self._mount_path:
            self._extraction_worker.SetMountPath(self._mount_path)

        if self._text_prepend:
            self._extraction_worker.SetTextPrepend(self._text_prepend)

        # We need to initialize the parser and hasher objects after the process
        # has forked otherwise on Windows the "fork" will fail with
        # a PickleError for Python modules that cannot be pickled.
        self._extraction_worker.InitializeParserObjects(
            parser_filter_string=self._parser_filter_string)

        if self._hasher_names_string:
            self._extraction_worker.SetHashers(self._hasher_names_string)

        logging.debug(u'Extraction worker: {0!s} (PID: {1:d}) started'.format(
            self._name, self._pid))

        try:
            self._extraction_worker.Run()

        except Exception as exception:
            logging.warning((u'Unhandled exception in extraction worker {0!s} '
                             u'(PID: {1:d}).').format(self._name, self._pid))
            logging.exception(exception)

        logging.debug(u'Extraction worker: {0!s} (PID: {1:d}) stopped'.format(
            self._name, self._pid))

        self._path_spec_queue.Close(abort=True)
        self._event_object_queue.Close(abort=True)
        self._parse_error_queue.Close(abort=True)
Exemple #14
0
  def _ProcessSourceMultiProcessMode(self, options):
    """Processes the source in a multiple process.

    Multiprocessing is used to start up separate processes.

    Args:
      options: the command line arguments (instance of argparse.Namespace).
    """
    # TODO: replace by an option.
    start_collection_process = True

    self._number_of_worker_processes = getattr(options, 'workers', 0)

    logging.info(u'Starting extraction in multi process mode.')

    self._engine = multi_process.MultiProcessEngine(
        maximum_number_of_queued_items=self._queue_size)

    self._engine.SetEnableDebugOutput(self._debug_mode)
    self._engine.SetEnableProfiling(
        self._enable_profiling,
        profiling_sample_rate=self._profiling_sample_rate)
    self._engine.SetProcessArchiveFiles(self._process_archive_files)

    if self._filter_object:
      self._engine.SetFilterObject(self._filter_object)

    if self._mount_path:
      self._engine.SetMountPath(self._mount_path)

    if self._text_prepend:
      self._engine.SetTextPrepend(self._text_prepend)
    # TODO: add support to handle multiple partitions.
    self._engine.SetSource(
        self.GetSourcePathSpec(), resolver_context=self._resolver_context)

    logging.debug(u'Starting preprocessing.')
    pre_obj = self.PreprocessSource(options)
    logging.debug(u'Preprocessing done.')

    # TODO: make sure parsers option is not set by preprocessing.
    parser_filter_string = getattr(options, 'parsers', '')

    self._parser_names = []
    for _, parser_class in parsers_manager.ParsersManager.GetParsers(
        parser_filter_string=parser_filter_string):
      self._parser_names.append(parser_class.NAME)

    hasher_names_string = getattr(options, u'hashers', u'')

    self._hasher_names = []
    hasher_manager = hashers_manager.HashersManager
    for hasher_name in hasher_manager.GetHasherNamesFromString(
        hasher_names_string=hasher_names_string):
      self._hasher_names.append(hasher_name)

    self._PreprocessSetCollectionInformation(options, pre_obj)

    if 'filestat' in self._parser_names:
      include_directory_stat = True
    else:
      include_directory_stat = False

    filter_file = getattr(options, 'file_filter', None)
    if filter_file:
      filter_find_specs = engine_utils.BuildFindSpecsFromFile(
          filter_file, pre_obj=pre_obj)
    else:
      filter_find_specs = None

    if start_collection_process:
      resolver_context = context.Context()
    else:
      resolver_context = self._resolver_context

    # TODO: create multi process collector.
    self._collector = self._engine.CreateCollector(
        include_directory_stat, vss_stores=self._vss_stores,
        filter_find_specs=filter_find_specs, resolver_context=resolver_context)

    self._DebugPrintCollector(options)

    if self._output_module:
      storage_writer = storage.BypassStorageWriter(
          self._engine.storage_queue, self._storage_file_path,
          output_module_string=self._output_module, pre_obj=pre_obj)
    else:
      storage_writer = storage.StorageFileWriter(
          self._engine.storage_queue, self._storage_file_path,
          buffer_size=self._buffer_size, pre_obj=pre_obj,
          serializer_format=self._storage_serializer_format)

    try:
      self._engine.ProcessSource(
          self._collector, storage_writer,
          parser_filter_string=parser_filter_string,
          hasher_names_string=hasher_names_string,
          number_of_extraction_workers=self._number_of_worker_processes,
          have_collection_process=start_collection_process,
          have_foreman_process=self._run_foreman,
          show_memory_usage=self._show_worker_memory_information)

    except KeyboardInterrupt:
      self._CleanUpAfterAbort()
      raise errors.UserAbort(u'Process source aborted.')
Exemple #15
0
 def setUp(self):
   """Sets up the needed objects used throughout the test."""
   self._resolver_context = context.Context()
   test_file = self._GetTestFilePath(['syslog.gz'])
   path_spec = os_path_spec.OSPathSpec(location=test_file)
   self._gzip_path_spec = gzip_path_spec.GzipPathSpec(parent=path_spec)
Exemple #16
0
    def _Main(self):
        """The main loop."""
        self._parser_mediator = parsers_mediator.ParserMediator(
            None,
            self._knowledge_base,
            preferred_year=self._preferred_year,
            temporary_directory=self._temporary_directory)

        if self._filter_object:
            self._parser_mediator.SetFilterObject(self._filter_object)

        if self._mount_path:
            self._parser_mediator.SetMountPath(self._mount_path)

        if self._text_prepend:
            self._parser_mediator.SetTextPrepend(self._text_prepend)

        # We need a resolver context per process to prevent multi processing
        # issues with file objects stored in images.
        resolver_context = context.Context()

        # We need to initialize the parser and hasher objects after the process
        # has forked otherwise on Windows the "fork" will fail with
        # a PickleError for Python modules that cannot be pickled.
        self._extraction_worker = worker.EventExtractionWorker(
            resolver_context,
            parser_filter_expression=self._parser_filter_expression,
            process_archives=self._process_archives,
            process_compressed_streams=self._process_compressed_streams)

        if self._hasher_names_string:
            self._extraction_worker.SetHashers(self._hasher_names_string)

        if self._yara_rules_string:
            self._extraction_worker.SetYaraRules(self._yara_rules_string)

        self._StartProfiling()

        logging.debug(u'Worker: {0!s} (PID: {1:d}) started'.format(
            self._name, self._pid))

        self._status = definitions.PROCESSING_STATUS_RUNNING

        try:
            logging.debug(
                u'{0!s} (PID: {1:d}) started monitoring task queue.'.format(
                    self._name, self._pid))

            while not self._abort:
                try:
                    task = self._task_queue.PopItem()
                except (errors.QueueClose, errors.QueueEmpty) as exception:
                    logging.debug(
                        u'ConsumeItems exiting with exception {0:s}.'.format(
                            type(exception)))
                    break

                if isinstance(task, plaso_queue.QueueAbort):
                    logging.debug(
                        u'ConsumeItems exiting, dequeued QueueAbort object.')
                    break

                self._ProcessTask(task)

            logging.debug(
                u'{0!s} (PID: {1:d}) stopped monitoring task queue.'.format(
                    self._name, self._pid))

        # All exceptions need to be caught here to prevent the process
        # from being killed by an uncaught exception.
        except Exception as exception:  # pylint: disable=broad-except
            logging.warning(
                u'Unhandled exception in process: {0!s} (PID: {1:d}).'.format(
                    self._name, self._pid))
            logging.exception(exception)

            self._abort = True

        self._StopProfiling()
        self._extraction_worker = None
        self._parser_mediator = None
        self._storage_writer = None

        if self._abort:
            self._status = definitions.PROCESSING_STATUS_ABORTED
        else:
            self._status = definitions.PROCESSING_STATUS_COMPLETED

        logging.debug(u'Worker: {0!s} (PID: {1:d}) stopped'.format(
            self._name, self._pid))

        try:
            self._task_queue.Close(abort=self._abort)
        except errors.QueueAlreadyClosed:
            logging.error(u'Queue for {0:s} was already closed.'.format(
                self.name))
Exemple #17
0
    def testExtractPathSpecsStorageMediaImageWithPartitions(self):
        """Tests the ExtractPathSpecs function an image file with partitions.

    The image file contains 2 partitions, p1 and p2, both with a NFTS
    file systems.
    """
        # Note that the source file is a RAW (VMDK flat) image.
        test_file = self._GetTestFilePath(['multi_partition_image.vmdk'])

        image_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file)

        p1_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            location='/p1',
            part_index=2,
            start_offset=0x00010000,
            parent=image_path_spec)
        p1_file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK,
            location='/',
            parent=p1_path_spec)

        p2_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
            location='/p2',
            part_index=3,
            start_offset=0x00510000,
            parent=image_path_spec)
        p2_file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
            dfvfs_definitions.TYPE_INDICATOR_TSK,
            location='/',
            parent=p2_path_spec)

        resolver_context = context.Context()
        test_extractor = extractors.PathSpecExtractor(resolver_context)

        path_specs = list(
            test_extractor.ExtractPathSpecs(
                [p1_file_system_path_spec, p2_file_system_path_spec],
                resolver_context=resolver_context))

        expected_paths_p1 = [
            '/$AttrDef', '/$BadClus', '/$BadClus:$Bad', '/$Bitmap', '/$Boot',
            '/$Extend', '/$Extend/$ObjId', '/$Extend/$Quota',
            '/$Extend/$Reparse', '/$Extend/$RmMetadata',
            '/$Extend/$RmMetadata/$Repair',
            '/$Extend/$RmMetadata/$Repair:$Config',
            '/$Extend/$RmMetadata/$TxfLog', '/$LogFile', '/$MFT', '/$MFTMirr',
            '/$Secure', '/$Secure:$SDS', '/$UpCase', '/$Volume', '/file1.txt',
            '/file2.txt'
        ]

        expected_paths_p2 = [
            '/$AttrDef', '/$BadClus', '/$BadClus:$Bad', '/$Bitmap', '/$Boot',
            '/$Extend', '/$Extend/$ObjId', '/$Extend/$Quota',
            '/$Extend/$Reparse', '/$Extend/$RmMetadata',
            '/$Extend/$RmMetadata/$Repair',
            '/$Extend/$RmMetadata/$Repair:$Config',
            '/$Extend/$RmMetadata/$TxfLog', '/$LogFile', '/$MFT', '/$MFTMirr',
            '/$Secure', '/$Secure:$SDS', '/$UpCase', '/$Volume',
            '/file1_on_part_2.txt', '/file2_on_part_2.txt'
        ]

        paths = self._GetFilePaths(path_specs)
        expected_paths = expected_paths_p1
        expected_paths.extend(expected_paths_p2)

        self.assertEqual(len(path_specs), len(expected_paths))
        self.assertEqual(sorted(paths), sorted(expected_paths))
Exemple #18
0
  def GetEntries(self, parser_context, match=None, **unused_kwargs):
    """Extracts relevant user timestamp entries.

    Args:
      parser_context: A parser context object (instance of ParserContext).
      match: Optional dictionary containing keys extracted from PLIST_KEYS.
             The default is None.
    """
    account = match['name'][0]
    uid = match['uid'][0]
    cocoa_zero = (
        timelib.Timestamp.COCOA_TIME_TO_POSIX_BASE *
        timelib.Timestamp.MICRO_SECONDS_PER_SECOND)
    # INFO: binplist return a string with the Plist XML.
    for policy in match['passwordpolicyoptions']:
      xml_policy = ElementTree.fromstring(policy)
      for dict_elements in xml_policy.iterfind('dict'):
        key_values = [value.text for value in dict_elements.getchildren()]
        policy_dict = dict(zip(key_values[0::2], key_values[1::2]))

      if policy_dict.get('passwordLastSetTime', 0):
        timestamp = timelib.Timestamp.FromTimeString(
             policy_dict.get('passwordLastSetTime', '0'))
        if timestamp > cocoa_zero:
          # Extract the hash password information.
          # It is store in the attribure ShadowHasData which is
          # a binary plist data; However binplist only extract one
          # level of binary plist, then it returns this information
          # as a string.

          # TODO: change this into a DataRange instead. For this we
          # need the file offset and size of the ShadowHashData value data.
          resolver_context = context.Context()
          fake_file = fake_file_io.FakeFile(
              resolver_context, match['ShadowHashData'][0])
          fake_file.open(path_spec=fake_path_spec.FakePathSpec(
              location=u'ShadowHashData'))

          try:
            plist_file = binplist.BinaryPlist(file_obj=fake_file)
            top_level = plist_file.Parse()
          except binplist.FormatError:
            top_level = dict()
          salted_hash = top_level.get('SALTED-SHA512-PBKDF2', None)
          if salted_hash:
            password_hash = u'$ml${0:d}${1:s}${2:s}'.format(
                salted_hash['iterations'],
                binascii.hexlify(salted_hash['salt']),
                binascii.hexlify(salted_hash['entropy']))
          else:
            password_hash = u'N/A'
          description = (
              u'Last time {0:s} ({1!s}) changed the password: {2!s}').format(
                  account, uid, password_hash)
          event_object = plist_event.PlistTimeEvent(
              self._ROOT, u'passwordLastSetTime', timestamp, description)
          parser_context.ProduceEvent(event_object, plugin_name=self.NAME)

      if policy_dict.get('lastLoginTimestamp', 0):
        timestamp = timelib.Timestamp.FromTimeString(
             policy_dict.get('lastLoginTimestamp', '0'))
        description = u'Last login from {0:s} ({1!s})'.format(account, uid)
        if timestamp > cocoa_zero:
          event_object = plist_event.PlistTimeEvent(
              self._ROOT, u'lastLoginTimestamp', timestamp, description)
          parser_context.ProduceEvent(event_object, plugin_name=self.NAME)

      if policy_dict.get('failedLoginTimestamp', 0):
        timestamp = timelib.Timestamp.FromTimeString(
             policy_dict.get('failedLoginTimestamp', '0'))
        description = (
            u'Last failed login from {0:s} ({1!s}) ({2!s} times)').format(
                account, uid, policy_dict['failedLoginCount'])
        if timestamp > cocoa_zero:
          event_object = plist_event.PlistTimeEvent(
              self._ROOT, u'failedLoginTimestamp', timestamp, description)
          parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
Exemple #19
0
  def _Main(self):
    """The main loop."""
    # We need a resolver context per process to prevent multi processing
    # issues with file objects stored in images.
    resolver_context = context.Context()

    for credential_configuration in self._processing_configuration.credentials:
      resolver.Resolver.key_chain.SetCredential(
          credential_configuration.path_spec,
          credential_configuration.credential_type,
          credential_configuration.credential_data)

    self._parser_mediator = parsers_mediator.ParserMediator(
        None, self._knowledge_base,
        preferred_year=self._processing_configuration.preferred_year,
        resolver_context=resolver_context,
        temporary_directory=self._processing_configuration.temporary_directory)

    self._parser_mediator.SetEventExtractionConfiguration(
        self._processing_configuration.event_extraction)

    self._parser_mediator.SetInputSourceConfiguration(
        self._processing_configuration.input_source)

    # We need to initialize the parser and hasher objects after the process
    # has forked otherwise on Windows the "fork" will fail with
    # a PickleError for Python modules that cannot be pickled.
    self._extraction_worker = worker.EventExtractionWorker(
        parser_filter_expression=(
            self._processing_configuration.parser_filter_expression))

    self._extraction_worker.SetExtractionConfiguration(
        self._processing_configuration.extraction)

    self._StartProfiling()

    logging.debug('Worker: {0!s} (PID: {1:d}) started'.format(
        self._name, self._pid))

    self._status = definitions.PROCESSING_STATUS_RUNNING

    try:
      logging.debug('{0!s} (PID: {1:d}) started monitoring task queue.'.format(
          self._name, self._pid))

      while not self._abort:
        try:
          task = self._task_queue.PopItem()
        except (errors.QueueClose, errors.QueueEmpty) as exception:
          logging.debug('ConsumeItems exiting with exception {0:s}.'.format(
              type(exception)))
          break

        if isinstance(task, plaso_queue.QueueAbort):
          logging.debug('ConsumeItems exiting, dequeued QueueAbort object.')
          break

        self._ProcessTask(task)

      logging.debug('{0!s} (PID: {1:d}) stopped monitoring task queue.'.format(
          self._name, self._pid))

    # All exceptions need to be caught here to prevent the process
    # from being killed by an uncaught exception.
    except Exception as exception:  # pylint: disable=broad-except
      logging.warning(
          'Unhandled exception in process: {0!s} (PID: {1:d}).'.format(
              self._name, self._pid))
      logging.exception(exception)

      self._abort = True

    self._StopProfiling()
    self._extraction_worker = None
    self._parser_mediator = None
    self._storage_writer = None

    if self._abort:
      self._status = definitions.PROCESSING_STATUS_ABORTED
    else:
      self._status = definitions.PROCESSING_STATUS_COMPLETED

    logging.debug('Worker: {0!s} (PID: {1:d}) stopped'.format(
        self._name, self._pid))

    try:
      self._task_queue.Close(abort=self._abort)
    except errors.QueueAlreadyClosed:
      logging.error('Queue for {0:s} was already closed.'.format(self.name))
Exemple #20
0
  def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
    """Extracts relevant user timestamp entries.

    Args:
      parser_mediator (ParserMediator): mediates interactions between parsers
          and other components, such as storage and dfvfs.
      match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
    """
    if 'name' not in match or 'uid' not in match:
      return

    account = match['name'][0]
    uid = match['uid'][0]

    # INFO: binplist return a string with the Plist XML.
    for policy in match.get('passwordpolicyoptions', []):
      try:
        xml_policy = ElementTree.fromstring(policy)
      except (ElementTree.ParseError, LookupError) as exception:
        logging.error((
            'Unable to parse XML structure for an user policy, account: '
            '{0:s} and uid: {1!s}, with error: {2!s}').format(
                account, uid, exception))
        continue

      for dict_elements in xml_policy.iterfind('dict'):
        key_values = [value.text for value in dict_elements.getchildren()]
        # Taking a list and converting it to a dict, using every other item
        # as the key and the other one as the value.
        policy_dict = dict(zip(key_values[0::2], key_values[1::2]))

      time_string = policy_dict.get('passwordLastSetTime', None)
      if time_string and time_string != '2001-01-01T00:00:00Z':
        try:
          date_time = dfdatetime_time_elements.TimeElements()
          date_time.CopyFromStringISO8601(time_string)
        except ValueError:
          date_time = None
          parser_mediator.ProduceExtractionError(
              'unable to parse passworkd last set time string: {0:s}'.format(
                  time_string))

        shadow_hash_data = match.get('ShadowHashData', None)
        if date_time and isinstance(shadow_hash_data, (list, tuple)):
          # Extract the hash password information.
          # It is store in the attribute ShadowHasData which is
          # a binary plist data; However binplist only extract one
          # level of binary plist, then it returns this information
          # as a string.

          # TODO: change this into a DataRange instead. For this we
          # need the file offset and size of the ShadowHashData value data.
          shadow_hash_data = shadow_hash_data[0]

          resolver_context = context.Context()
          fake_file = fake_file_io.FakeFile(
              resolver_context, shadow_hash_data)
          shadow_hash_data_path_spec = fake_path_spec.FakePathSpec(
              location='ShadowHashData')
          fake_file.open(path_spec=shadow_hash_data_path_spec)

          try:
            plist_file = binplist.BinaryPlist(file_obj=fake_file)
            top_level = plist_file.Parse()
          except binplist.FormatError:
            top_level = dict()
          salted_hash = top_level.get('SALTED-SHA512-PBKDF2', None)
          if salted_hash:
            password_hash = '$ml${0:d}${1:s}${2:s}'.format(
                salted_hash['iterations'],
                binascii.hexlify(salted_hash['salt']),
                binascii.hexlify(salted_hash['entropy']))
          else:
            password_hash = 'N/A'

          event_data = plist_event.PlistTimeEventData()
          event_data.desc = (
              'Last time {0:s} ({1!s}) changed the password: {2!s}').format(
                  account, uid, password_hash)
          event_data.key = 'passwordLastSetTime'
          event_data.root = self._ROOT

          event = time_events.DateTimeValuesEvent(
              date_time, definitions.TIME_DESCRIPTION_WRITTEN)
          parser_mediator.ProduceEventWithEventData(event, event_data)

      time_string = policy_dict.get('lastLoginTimestamp', None)
      if time_string and time_string != '2001-01-01T00:00:00Z':
        try:
          date_time = dfdatetime_time_elements.TimeElements()
          date_time.CopyFromStringISO8601(time_string)
        except ValueError:
          date_time = None
          parser_mediator.ProduceExtractionError(
              'unable to parse last login time string: {0:s}'.format(
                  time_string))

        if date_time:
          event_data = plist_event.PlistTimeEventData()
          event_data.desc = 'Last login from {0:s} ({1!s})'.format(
              account, uid)
          event_data.key = 'lastLoginTimestamp'
          event_data.root = self._ROOT

          event = time_events.DateTimeValuesEvent(
              date_time, definitions.TIME_DESCRIPTION_WRITTEN)
          parser_mediator.ProduceEventWithEventData(event, event_data)

      time_string = policy_dict.get('failedLoginTimestamp', None)
      if time_string and time_string != '2001-01-01T00:00:00Z':
        try:
          date_time = dfdatetime_time_elements.TimeElements()
          date_time.CopyFromStringISO8601(time_string)
        except ValueError:
          date_time = None
          parser_mediator.ProduceExtractionError(
              'unable to parse failed login time string: {0:s}'.format(
                  time_string))

        if date_time:
          event_data = plist_event.PlistTimeEventData()
          event_data.desc = (
              'Last failed login from {0:s} ({1!s}) ({2!s} times)').format(
                  account, uid, policy_dict.get('failedLoginCount', 0))
          event_data.key = 'failedLoginTimestamp'
          event_data.root = self._ROOT

          event = time_events.DateTimeValuesEvent(
              date_time, definitions.TIME_DESCRIPTION_WRITTEN)
          parser_mediator.ProduceEventWithEventData(event, event_data)
Exemple #21
0
 def setUp(self):
     """Sets up the needed objects used throughout the test."""
     self._resolver_context = context.Context()
Exemple #22
0
  def __init__(
      self, debug_output=False, enable_profiling=False,
      maximum_number_of_tasks=_MAXIMUM_NUMBER_OF_TASKS,
      profiling_directory=None, profiling_sample_rate=1000,
      profiling_type=u'all', use_zeromq=True):
    """Initializes an engine object.

    Args:
      debug_output (Optional[bool]): True if debug output should be enabled.
      enable_profiling (Optional[bool]): True if profiling should be enabled.
      maximum_number_of_tasks (Optional[int]): maximum number of concurrent
          tasks, where 0 represents no limit.
      profiling_directory (Optional[str]): path to the directory where
          the profiling sample files should be stored.
      profiling_sample_rate (Optional[int]): the profiling sample rate.
          Contains the number of event sources processed.
      profiling_type (Optional[str]): type of profiling.
          Supported types are:

          * 'memory' to profile memory usage;
          * 'parsers' to profile CPU time consumed by individual parsers;
          * 'processing' to profile CPU time consumed by different parts of
            the processing;
          * 'serializers' to profile CPU time consumed by individual
            serializers.
      use_zeromq (Optional[bool]): True if ZeroMQ should be used for queuing
          instead of Python's multiprocessing queue.
    """
    super(TaskMultiProcessEngine, self).__init__(
        debug_output=debug_output, enable_profiling=enable_profiling,
        profiling_directory=profiling_directory,
        profiling_sample_rate=profiling_sample_rate,
        profiling_type=profiling_type)
    self._enable_sigsegv_handler = False
    self._filter_find_specs = None
    self._filter_object = None
    self._hasher_names_string = None
    self._last_worker_number = 0
    self._maximum_number_of_tasks = maximum_number_of_tasks
    self._memory_profiler = None
    self._merge_task = None
    self._merge_task_on_hold = None
    self._mount_path = None
    self._number_of_consumed_errors = 0
    self._number_of_consumed_event_tags = 0
    self._number_of_consumed_events = 0
    self._number_of_consumed_reports = 0
    self._number_of_consumed_sources = 0
    self._number_of_produced_errors = 0
    self._number_of_produced_event_tags = 0
    self._number_of_produced_events = 0
    self._number_of_produced_reports = 0
    self._number_of_produced_sources = 0
    self._number_of_worker_processes = 0
    self._parser_filter_expression = None
    self._preferred_year = None
    self._process_archives = False
    self._process_compressed_streams = True
    self._processing_profiler = None
    self._resolver_context = context.Context()
    self._serializers_profiler = None
    self._session_identifier = None
    self._status = definitions.PROCESSING_STATUS_IDLE
    self._storage_merge_reader = None
    self._storage_merge_reader_on_hold = None
    self._storage_writer = None
    self._task_queue = None
    self._task_queue_port = None
    self._task_manager = task_manager.TaskManager(
        maximum_number_of_tasks=maximum_number_of_tasks)
    self._temporary_directory = None
    self._text_prepend = None
    self._use_zeromq = use_zeromq
    self._yara_rules_string = None