Ejemplo n.º 1
0
    def collect(self):
        # Print kuser_shared things.
        kuser_shared = self.profile.get_constant_object(
            "KI_USER_SHARED_DATA", "_KUSER_SHARED_DATA")

        interrupt_time = ((kuser_shared.InterruptTime.High1Time << 32) +
                          kuser_shared.InterruptTime.LowPart)
        now = kuser_shared.SystemTime.as_windows_timestamp() - interrupt_time
        for i, timer in utils.Deduplicate(self.timers(), key=lambda x: x[1]):
            if timer.Header.SignalState.v():
                signaled = "Yes"
            else:
                signaled = "-"

            yield dict(
                Tbl=i,
                _KTIMER=timer,
                # Due time in InterruptTime (100ns).
                due="0x%0.20x" % timer.DueTime.QuadPart,
                due_time=self.profile.WinFileTime(value=now +
                                                  timer.DueTime.QuadPart,
                                                  is_utc=True),
                period=timer.Period,
                sig=signaled,
                routine=timer.Dpc.DeferredRoutine,
                symbol=utils.FormattedAddress(self.session.address_resolver,
                                              timer.Dpc.DeferredRoutine))
Ejemplo n.º 2
0
    def collect(self):

        self.file_objects = set()
        self.vacb_by_cache_map = {}

        renderer = self.session.GetRenderer()
        if not self.plugin_args.file_objects:
            self.CollectFileObject()
        else:
            self.file_objects = set(
                [self.session.profile._FILE_OBJECT(int(x))
                 for x in self.plugin_args.file_objects])

        for file_object in utils.Deduplicate(
                self.file_objects, key=self._sanitized_filename):
            filename = self._sanitized_filename(file_object)

            self.session.report_progress(" Dumping %s", filename)
            with renderer.open(directory=self.dump_dir,
                               filename=filename, mode="wb") as out_fd:
                filename = out_fd.name

                # Sometimes we get both subsections.
                ca = file_object.SectionObjectPointer.ImageSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "ImageSectionObject",
                                  filename, renderer)

                ca = file_object.SectionObjectPointer.DataSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "DataSectionObject",
                                  filename, renderer)

                scm = file_object.SectionObjectPointer.SharedCacheMap.v()
                if scm == None:
                    continue

                # Augment the data with the cache manager.
                for vacb in self.vacb_by_cache_map.get(scm, []):
                    base_address = vacb.BaseAddress.v()
                    file_offset = vacb.Overlay.FileOffset.QuadPart.v()

                    # Each VACB controls a 256k buffer.
                    for offset in utils.xrange(0, 0x40000, 0x1000):
                        phys_address = self.kernel_address_space.vtop(
                            base_address + offset)

                        if phys_address:
                            yield dict(type="VACB",
                                       p_offset=phys_address,
                                       f_offset=file_offset+offset,
                                       f_length=0x1000,
                                       filename=filename)

                            # This writes a sparse file.
                            out_fd.seek(file_offset + offset)
                            out_fd.write(self.physical_address_space.read(
                                phys_address, 0x1000))
Ejemplo n.º 3
0
 def collect(self):
     self.rows = 0
     for x in utils.Deduplicate(itertools.chain(
             self.collect_from_MiSystemVaType(),
             self.collect_from_MiVisibleState(), self.collect_from_pools()),
                                key=lambda x:
                                (int(x["virt_start"]), int(x["virt_end"]))):
         self.rows += 1
         yield x
Ejemplo n.º 4
0
    def _collect_directory(self, directory, seen, depth=0):
        for obj_header in utils.Deduplicate(directory.list()):
            name = str(obj_header.NameInfo.Name)
            obj_type = str(obj_header.get_object_type())

            if obj_type == "SymbolicLink":
                name += u"-> %s (%s)" % (obj_header.Object.LinkTarget,
                                         obj_header.Object.CreationTime)

            if self.plugin_args.type_regex.search(obj_type):
                yield dict(_OBJECT_HEADER=obj_header, type=obj_type,
                           name=name, depth=depth)

            if obj_type == "Directory":
                for x in self._collect_directory(
                        obj_header.Object, seen, depth=depth+1):
                    yield x
Ejemplo n.º 5
0
    def session_spaces(self):
        """Generates unique _MM_SESSION_SPACE objects.

        Generates unique _MM_SESSION_SPACE objects referenced by active
        processes.

        Yields:
          _MM_SESSION_SPACE instantiated from the session space's address space.
        """
        # Dedup based on sessions.
        for proc in utils.Deduplicate(self.filter_processes(),
                                      key=lambda x: x.Session):
            ps_ad = proc.get_process_address_space()

            session = proc.Session.deref(vm=ps_ad)
            # Session pointer is invalid (e.g. for System process).
            if session:
                yield session
Ejemplo n.º 6
0
    def _locate_heap(self, task, vad):
        """Locate the correct heap by scanning for its reference.

        Find the references into the heap from the dnsrslvr.dll vad. This will
        normally be stored in dnsrslvr.dll's global variable called g_CacheHeap.
        """
        scanner = scan.PointerScanner(
            pointers=task.Peb.ProcessHeaps,
            session=self.session,
            address_space=self.session.GetParameter("default_address_space"))

        for hit in utils.Deduplicate(scanner.scan(vad.Start,
                                                  maxlen=vad.Length)):
            heap = self.heap_profile.Pointer(hit, target="_HEAP").deref()

            for entry in heap.Entries:
                hash_table = self._verify_hash_table(entry.Allocation, heap)
                if hash_table:
                    return hash_table
Ejemplo n.º 7
0
    def enumerate_lfh_heap_allocations(self, heap, skip_freed=False):
        """Dump the low fragmentation heap."""
        for lfh_block in heap.FrontEndHeap.SubSegmentZones.list_of_type(
                "_LFH_BLOCK_ZONE", "ListEntry"):
            block_length = lfh_block.FreePointer.v() - lfh_block.obj_end
            segments = heap.obj_profile.Array(target="_HEAP_SUBSEGMENT",
                                              offset=lfh_block.obj_end,
                                              size=block_length)

            for segment in utils.Deduplicate(
                    segments, key=lambda segment: segment.UserBlocks.v()):
                allocation_length = segment.BlockSize * 16

                for entry in segment.UserBlocks.Entries:
                    # http://www.leviathansecurity.com/blog/understanding-the-windows-allocator-a-redux/
                    # Skip freed blocks if requested.
                    if skip_freed and entry.UnusedBytes & 0x38:
                        continue

                    UnusedBytes = entry.UnusedBytes & 0x3f - 0x8

                    # The actual length of user allocation is the difference
                    # between the HEAP allocation bin size and the unused bytes
                    # at the end of the allocation.
                    data_len = allocation_length - UnusedBytes

                    # The data length can not be larger than the allocation
                    # minus the critical parts of _HEAP_ENTRY. Sometimes,
                    # allocations overrun into the next element's _HEAP_ENTRY so
                    # they can store data in the next entry's
                    # entry.PreviousBlockPrivateData. In this case the
                    # allocation length seems to be larger by 8 bytes.
                    if data_len > allocation_length - 0x8:
                        data_len -= 0x8

                    yield (heap.obj_profile.String(entry.obj_end,
                                                   term=None,
                                                   length=data_len),
                           allocation_length)