Пример #1
0
    def __init__(self, addresses=None, preamble=32, **kwargs):
        super(ShowAllocation, self).__init__(**kwargs)
        if isinstance(addresses, int):
            addresses = [addresses]

        self.addresses = addresses
        self.offset = None
        self.preamble = preamble
        self.allocations = getattr(self.session.address_resolver,
                                   "heap_allocations", None)

        if self.allocations is None:
            allocations = utils.RangedCollection()
            inspect_heap = self.session.plugins.inspect_heap()
            for allocation in inspect_heap.enumerate_heap_allocations(
                    self.session.GetParameter("process_context")):

                # Include the header in the allocation.
                allocations.insert(
                    allocation.obj_offset - 16,
                    allocation.obj_offset + allocation.length + 16,
                    (allocation.obj_offset, allocation.length))

                self.session.address_resolver.heap_allocations = allocations
                self.allocations = allocations
                self.session.report_progress("Enumerating alllocation: %#x",
                                             lambda: allocation.obj_offset)
Пример #2
0
    def DecodeFromJsonSafe(self, state, options):
        result = utils.RangedCollection()
        for start, end, encoded_data in state["data"]:
            result.insert(start, end,
                          self._decode_value(encoded_data, options))

        return result
Пример #3
0
    def BuildAllocationMap(self):
        """Build a map of all allocations for fast looksup."""
        allocations = utils.RangedCollection()
        inspect_heap = self.session.plugins.inspect_heap()
        for heap in inspect_heap.GenerateHeaps():
            # First do the backend allocations.
            for allocation in inspect_heap.enumerate_backend_heap_allocations(
                    heap):

                # Include the header in the allocation.
                allocations.insert(
                    allocation.obj_offset - 16,
                    allocation.obj_offset + allocation.length + 16,
                    (allocation.obj_offset, allocation.length, "B"))

                self.session.report_progress(
                    "Enumerating backend allocation: %#x",
                    lambda allocation=allocation: allocation.obj_offset)

            # Now do the LFH allocations (These will mask the subsegments in the
            # RangedCollection).
            for _ in inspect_heap.enumerate_lfh_heap_allocations(
                    heap, skip_freed=False):
                allocation, allocation_length = _
                self.session.report_progress(
                    "Enumerating frontend allocation: %#x",
                    lambda: allocation.obj_offset)

                # Front end allocations do not have their own headers.
                allocations.insert(
                    allocation.obj_offset,
                    allocation.obj_offset + allocation_length,
                    (allocation.obj_offset, allocation_length, "F"))

        return allocations
Пример #4
0
    def reset(self):
        # A ranged collection of Module() objects.
        self._address_ranges = utils.RangedCollection()

        # A lookup between module names and the Module object itself.
        self._modules_by_name = {}

        self._initialized = False
Пример #5
0
    def is_address_in_pool(self, address):
        if self._pool_lookup is None:
            self._pool_lookup = utils.RangedCollection()
            for descriptor in self.find_all_pool_descriptors():
                self._pool_lookup.insert(descriptor.PoolStart,
                                         descriptor.PoolEnd, descriptor)

        return self._pool_lookup.get_containing_range(address)
Пример #6
0
    def _make_cache(self, task):
        result = utils.RangedCollection()
        self.session.report_progress(" Enumerating VADs in %s (%s)", task.name,
                                     task.pid)

        for vad in task.RealVadRoot.traverse():
            result.insert(vad.Start, vad.End, (self._get_filename(vad), vad))

        return result
Пример #7
0
    def _verify_hash_table(self, allocation, heap):
        """Verify the allocation between start and end for a hash table.

        We have observed that often the hash table may contain corrupt data due
        to paging smear during acquisition, hence more rigorous checks might
        actually fail to find the correct hash table due to corrupted data
        confusing the sanity checks here. It is always better to detect the
        correct version using the profile repository.
        """
        self.session.logging.debug("Verifying hash table at %#x",
                                   allocation.obj_offset)

        if (self.plugin_args.hashtable
                and allocation.obj_offset != self.plugin_args.hashtable):
            return False

        # Find all segments in this heap.
        segments = utils.RangedCollection()
        for seg in heap.Segments:
            segments.insert(seg.FirstEntry, seg.LastValidEntry, seg)

        # We usually observe the hash table to be about 1600 bytes, but it might
        # grow.
        if allocation.length > 1600 * 3 or allocation.length < 1600:
            return False

        # Cast the allocation into a hash table.
        cache_hash_table = allocation.cast("Array",
                                           target="Pointer",
                                           target_args=dict(
                                               target="DNS_HASHTABLE_ENTRY", ),
                                           profile=self.profile,
                                           size=allocation.length)

        count = 0
        for entry in cache_hash_table:
            # If the hashtable entry is null, keep searching.
            entry = entry.v()
            if entry == 0:
                continue

            # ALL entry pointers must point back into one of the other segments
            # in this heap (Since DNS_HASHTABLE_ENTRY are allocated from this
            # heap)..
            dest_segment = segments.get_range(entry)
            if dest_segment is None:
                return False

            count += 1

        # It may be that the hashtable is all empty but otherwise we will match
        # a zero allocated block.
        if count == 0:
            return False

        return cache_hash_table
Пример #8
0
    def __init__(self, **kwargs):
        super(AddressResolverMixin, self).__init__(**kwargs)

        # A ranged collection of Module() objects.
        self._address_ranges = utils.RangedCollection()

        # A lookup between module names and the Module object itself.
        self._modules_by_name = {}

        self._initialized = False
Пример #9
0
    def calculate(self):
        result = utils.RangedCollection()
        for subsection_offset in self.session.GetParameter("subsections"):
            subsection = self.session.profile._SUBSECTION(subsection_offset)
            start = subsection.SubsectionBase.v()

            # Pte Arrays are always allocated from kernel pools.
            if start < self.session.GetParameter("highest_usermode_address"):
                continue

            end = start + (subsection.PtesInSubsection *
                           subsection.SubsectionBase[0].obj_size)
            result.insert(start, end, subsection_offset)

        return result
Пример #10
0
    def vad(self):
        """Returns a cached RangedCollection() of vad ranges."""

        # If this dtb is the same as the kernel dtb - there are no vads.
        if self.dtb == self.session.GetParameter("dtb"):
            return

        # If it is already cached, just return that.
        if self._vad is not None:
            return self._vad

        # We can not run plugins in recursive context.
        if not self._resolve_vads:
            return obj.NoneObject("vads not available right now")

        try:
            # Prevent recursively calling ourselves. We might resolve Prototype
            # PTEs which end up calling plugins (like the VAD plugin) which
            # might recursively translate another Vad Prototype address. This
            # safety below ensures we cant get into infinite recursion by
            # failing more complex PTE resolution on recursive calls.
            self._resolve_vads = False

            # Try to map the dtb to a task struct so we can look up the vads.
            if self.task == None:
                # Find the _EPROCESS for this dtb - we need to consult the VAD
                # for some of the address transition.
                self.task = self.session.GetParameter("dtb2task").get(self.dtb)

            self._vad = utils.RangedCollection()
            task = self.session.profile._EPROCESS(self.task)
            for vad in task.RealVadRoot.traverse():
                self._vad.insert(vad.Start, vad.End, vad)

            return self._vad
        finally:
            self._resolve_vads = True
Пример #11
0
 def __init__(self):
     self.collection = utils.RangedCollection()
     self.idx = 0
     self.label_color_map = {}
Пример #12
0
 def __init__(self, **kwargs):
     super(RunBasedAddressSpace, self).__init__(**kwargs)
     self.runs = utils.RangedCollection()
Пример #13
0
    def DecodeFromJsonSafe(self, state, _):
        result = utils.RangedCollection()
        result.collection = utils.SortedCollection(state["data"])

        return result