Exemple #1
0
    def collect(self):
        for hit in super(IRDump, self).collect():
            path = hit.get("path")
            if path:
                fd = path.open()
                if fd:
                    yield dict(divider=path.filename)

                    to_read = min(
                        self.plugin_args.length,
                        self.plugin_args.width * self.plugin_args.rows)
                    for offset in utils.xrange(
                            self.plugin_args.start,
                            self.plugin_args.start + to_read,
                            self.plugin_args.width):

                        fd.seek(offset)
                        data = fd.read(self.plugin_args.width)
                        if not data:
                            break

                        yield dict(
                            offset=offset,
                            FileSpec=path.filename,
                            hexdump=utils.HexDumpedString(data),
                            nowrap=True,
                            hex_width=self.plugin_args.width)
Exemple #2
0
    def collect(self):
        for hit in super(IRDump, self).collect():
            path = hit.get("path")
            if path:
                fd = path.open()
                if fd:
                    yield dict(divider=path.filename)

                    to_read = min(
                        self.plugin_args.length,
                        self.plugin_args.width * self.plugin_args.rows)
                    for offset in utils.xrange(
                            self.plugin_args.start,
                            self.plugin_args.start + to_read,
                            self.plugin_args.width):

                        fd.seek(offset)
                        data = fd.read(self.plugin_args.width)
                        if not data:
                            break

                        yield dict(
                            offset=offset,
                            FileSpec=path.filename,
                            hexdump=utils.HexDumpedString(data),
                            nowrap=True,
                            hex_width=self.plugin_args.width)
Exemple #3
0
    def render(self, renderer):
        """Renders the file to disk"""
        if self.output_image is None:
            raise plugin.PluginError("Please provide an output-image filename")

        if (os.path.exists(self.output_image) and
                os.path.getsize(self.output_image) > 1):
            raise plugin.PluginError("Refusing to overwrite an existing file, "
                                     "please remove it before continuing")

        blocksize = 1024 * 1024 * 5
        with renderer.open(filename=self.output_image, mode="wb") as fd:
            for run in self.address_space.get_mappings():
                renderer.format("Range {0:#x} - {1:#x}\n", run.start,
                                run.length)

                for offset in utils.xrange(
                        run.start, run.end, blocksize):
                    to_read = min(blocksize, run.end - offset)
                    data = self.address_space.read(offset, to_read)

                    fd.seek(offset)
                    fd.write(data)

                    renderer.RenderProgress(
                        "Writing offset %s" % self.human_readable(offset))
Exemple #4
0
    def dump_process(self, eprocess, fd, index_fd):
        task_as = eprocess.get_process_address_space()
        temp_renderer = text.TextRenderer(session=self.session,
                                          fd=index_fd)
        with temp_renderer.start():
            temp_renderer.table_header([
                ("File Address", "file_addr", "[addrpad]"),
                ("Length", "length", "[addrpad]"),
                ("Virtual Addr", "virtual", "[addrpad]")])

            # Only dump the userspace portion of addressable memory.
            max_memory = self.session.GetParameter("highest_usermode_address")
            blocksize = 1024 * 1024

            for run in task_as.get_address_ranges(end=max_memory):
                for offset in utils.xrange(run.start, run.end, blocksize):
                    to_read = min(blocksize, run.end - offset)
                    if to_read == 0:
                        break

                    data = task_as.read(offset, to_read)
                    file_offset = fd.tell()
                    fd.write(data)

                    # Write the index file.
                    temp_renderer.table_row(file_offset, to_read, offset)
Exemple #5
0
    def dump_process(self, eprocess, fd, index_fd):
        task_as = eprocess.get_process_address_space()
        temp_renderer = text.TextRenderer(session=self.session, fd=index_fd)
        with temp_renderer.start():
            temp_renderer.table_header([
                ("File Address", "file_addr", "[addrpad]"),
                ("Length", "length", "[addrpad]"),
                ("Virtual Addr", "virtual", "[addrpad]")
            ])

            # Only dump the userspace portion of addressable memory.
            max_memory = self.session.GetParameter("highest_usermode_address")
            blocksize = 1024 * 1024

            for run in task_as.get_address_ranges(end=max_memory):
                for offset in utils.xrange(run.start, run.end, blocksize):
                    to_read = min(blocksize, run.end - offset)
                    if to_read == 0:
                        break

                    data = task_as.read(offset, to_read)
                    file_offset = fd.tell()
                    fd.write(data)

                    # Write the index file.
                    temp_renderer.table_row(file_offset, to_read, offset)
Exemple #6
0
    def render(self, renderer):
        renderer.table_header([
            ("Type", "type", "20"),
            ("Phys Offset", "POffset", "[addrpad]"),
            ("File Offset", "FOffset", "[addrpad]"),
            ("File Length", "Flength", ">#05x"),
            ("Filename", "filename", "")
            ])

        self.CollectFileObject()
        seen_filenames = set()
        for file_object in self.file_objects:
            filename = unicode(
                file_object.file_name_with_device()).replace("\\", "_")

            if filename in seen_filenames:
                continue

            seen_filenames.add(filename)

            self.session.report_progress(" Dumping %s", filename)
            with renderer.open(directory=self.dump_dir,
                               filename=filename, mode="w") as out_fd:

                filename = out_fd.name

                # Sometimes we get both subsections.
                ca = file_object.SectionObjectPointer.ImageSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "ImageSectionObject",
                                  filename, renderer)

                ca = file_object.SectionObjectPointer.DataSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "DataSectionObject",
                                  filename, renderer)

                scm = file_object.SectionObjectPointer.SharedCacheMap.v()

                # Augment the data with the cache manager.
                for vacb in self.vacb_by_cache_map.get(scm, []):
                    base_address = vacb.BaseAddress.v()
                    file_offset = vacb.Overlay.FileOffset.QuadPart.v()

                    # Each VACB controls a 256k buffer.
                    for offset in utils.xrange(0, 0x40000, 0x1000):
                        phys_address = self.kernel_address_space.vtop(
                            base_address + offset)

                        if phys_address:
                            renderer.table_row(
                                "VACB", phys_address, file_offset+offset,
                                0x1000, filename)

                            # This writes a sparse file.
                            out_fd.seek(file_offset + offset)
                            out_fd.write(self.physical_address_space.read(
                                phys_address, 0x1000))
Exemple #7
0
    def render(self, renderer):
        renderer.table_header([
            ("Type", "type", "20"),
            ("Phys Offset", "POffset", "[addrpad]"),
            ("File Offset", "FOffset", "[addrpad]"),
            ("File Length", "Flength", ">#05x"),
            ("Filename", "filename", "")
            ])

        self.CollectFileObject()
        seen_filenames = set()
        for file_object in self.file_objects:
            filename = unicode(
                file_object.file_name_with_device()).replace("\\", "_")

            if filename in seen_filenames:
                continue

            seen_filenames.add(filename)

            self.session.report_progress(" Dumping %s", filename)
            with renderer.open(directory=self.dump_dir,
                               filename=filename, mode="w") as out_fd:

                filename = out_fd.name

                # Sometimes we get both subsections.
                ca = file_object.SectionObjectPointer.ImageSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "ImageSectionObject",
                                  filename, renderer)

                ca = file_object.SectionObjectPointer.DataSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "DataSectionObject",
                                  filename, renderer)

                scm = file_object.SectionObjectPointer.SharedCacheMap.v()

                # Augment the data with the cache manager.
                for vacb in self.vacb_by_cache_map.get(scm, []):
                    base_address = vacb.BaseAddress.v()
                    file_offset = vacb.Overlay.FileOffset.QuadPart.v()

                    # Each VACB controls a 256k buffer.
                    for offset in utils.xrange(0, 0x40000, 0x1000):
                        phys_address = self.kernel_address_space.vtop(
                            base_address + offset)

                        if phys_address:
                            renderer.table_row(
                                "VACB", phys_address, file_offset+offset,
                                0x1000, filename)

                            # This writes a sparse file.
                            out_fd.seek(file_offset + offset)
                            out_fd.write(self.physical_address_space.read(
                                phys_address, 0x1000))
Exemple #8
0
    def collect(self):
        renderer = self.session.GetRenderer()
        if not self.plugin_args.file_objects:
            self.CollectFileObject()
        else:
            self.file_objects = set(
                [self.session.profile._FILE_OBJECT(int(x))
                 for x in self.plugin_args.file_objects])

        seen_filenames = set()
        for file_object in self.file_objects:
            filename = unicode(
                file_object.file_name_with_device()).replace("\\", "_")

            if filename in seen_filenames:
                continue

            seen_filenames.add(filename)

            self.session.report_progress(" Dumping %s", filename)
            with renderer.open(directory=self.dump_dir,
                               filename=filename, mode="w") as out_fd:
                filename = out_fd.name

                # Sometimes we get both subsections.
                ca = file_object.SectionObjectPointer.ImageSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "ImageSectionObject",
                                  filename, renderer)

                ca = file_object.SectionObjectPointer.DataSectionObject
                if ca:
                    self._dump_ca(ca, out_fd, "DataSectionObject",
                                  filename, renderer)

                scm = file_object.SectionObjectPointer.SharedCacheMap.v()

                # Augment the data with the cache manager.
                for vacb in self.vacb_by_cache_map.get(scm, []):
                    base_address = vacb.BaseAddress.v()
                    file_offset = vacb.Overlay.FileOffset.QuadPart.v()

                    # Each VACB controls a 256k buffer.
                    for offset in utils.xrange(0, 0x40000, 0x1000):
                        phys_address = self.kernel_address_space.vtop(
                            base_address + offset)

                        if phys_address:
                            yield ("VACB", phys_address, file_offset+offset,
                                   0x1000, filename)

                            # This writes a sparse file.
                            out_fd.seek(file_offset + offset)
                            out_fd.write(self.physical_address_space.read(
                                phys_address, 0x1000))
Exemple #9
0
    def collect(self):
        phys_off = self.plugin_args.start

        end = self.plugin_args.end
        if end is None or end < phys_off:
            end = phys_off + 10 * 0x1000

        for phys_off in utils.xrange(self.plugin_args.start, end, 0x1000):
            for result in self.describe_phys_addr(phys_off):
                yield result

        # Re-run from here next invocation.
        self.plugin_args.start = phys_off
Exemple #10
0
    def collect(self):
        phys_off = self.plugin_args.start

        end = self.plugin_args.end
        if end is None or end < phys_off:
            end = phys_off + 10 * 0x1000

        for phys_off in utils.xrange(self.plugin_args.start, end, 0x1000):
            for result in self.describe_phys_addr(phys_off):
                yield result

        # Re-run from here next invocation.
        self.plugin_args.start = phys_off
Exemple #11
0
    def extract_mft_entries_from_vacb(self, vacb):
        base = vacb.BaseAddress.v()
        for offset in utils.xrange(base, base + self.vacb_size, self.mft_size):
            # Fixups are not applied in memory.
            mft = self.ntfs_profile.MFT_ENTRY(offset, context=dict(mft=self.mfts, ApplyFixup=False))
            if mft.magic != "FILE":
                continue

            mft_id = mft.mft_entry
            self.mfts[mft_id] = mft
            self.session.report_progress("Added: %s", lambda mft=mft: mft.filename.name)

            parent_id = mft.filename.mftReference.v()
            if parent_id not in self.dir_tree:
                self.dir_tree[parent_id] = set()

            self.dir_tree[parent_id].add(mft_id)
Exemple #12
0
    def GeneratePageMetatadata(self, task):
        address_space = self.session.GetParameter("default_address_space")

        for vma in task.mm.mmap.walk_list("vm_next"):
            start = vma.vm_start
            end = vma.vm_end

            # Skip the entire region.
            if end < self.plugin_args.start:
                continue

            # Done.
            if start > self.plugin_args.end:
                break

            for vaddr in utils.xrange(start, end, 0x1000):
                if self.plugin_args.start <= vaddr <= self.plugin_args.end:
                    yield vaddr, self._CreateMetadata(
                        address_space.describe_vtop(vaddr))
Exemple #13
0
    def extract_mft_entries_from_vacb(self, vacb):
        base = vacb.BaseAddress.v()
        for offset in utils.xrange(base, base + self.vacb_size, self.mft_size):
            # Fixups are not applied in memory.
            mft = self.ntfs_profile.MFT_ENTRY(
                offset, context=dict(mft=self.mfts, ApplyFixup=False))
            if mft.magic != "FILE":
                continue

            mft_id = mft.mft_entry
            self.mfts[mft_id] = mft
            self.session.report_progress(
                "Added: %s", lambda mft=mft: mft.filename.name)

            parent_id = mft.filename.mftReference.v()
            if parent_id not in self.dir_tree:
                self.dir_tree[parent_id] = set()

            self.dir_tree[parent_id].add(mft_id)
Exemple #14
0
    def GeneratePageMetatadata(self, proc):
        address_space = self.session.GetParameter("default_address_space")

        for map in proc.task.map.hdr.walk_list(
                "links.next", include_current=False):

            start = map.links.start
            end = map.links.end

            # Skip the entire region.
            if end < self.plugin_args.start:
                continue

            # Done.
            if start > self.plugin_args.end:
                break

            for vaddr in utils.xrange(start, end, 0x1000):
                if self.plugin_args.start <= vaddr <= self.plugin_args.end:
                    yield vaddr, self._CreateMetadata(
                        address_space.describe_vtop(vaddr))
Exemple #15
0
    def GeneratePageMetatadata(self, proc):
        address_space = self.session.GetParameter("default_address_space")

        for map in proc.task.map.hdr.walk_list(
            "links.next", include_current=False):

            start = map.links.start
            end = map.links.end

            # Skip the entire region.
            if end < self.start:
                continue

            # Done.
            if start > self.end:
                break

            for vaddr in utils.xrange(start, end, 0x1000):
                if self.start <= vaddr <= self.end:
                    yield vaddr, self._CreateMetadata(
                        address_space.describe_vtop(vaddr))
Exemple #16
0
    def IterObject(self, type=None, freed=True):
        """Generates possible _OBJECT_HEADER accounting for optional headers.

        Note that not all pool allocations have an _OBJECT_HEADER - only ones
        allocated from the the object manager. This means calling this method
        depends on which pool allocation you are after.

        On windows 8, pool allocations are done from preset sizes. This means
        that the allocation is never exactly the same size and we can not use
        the bottom up method like before.

        We therefore, have to build the headers forward by checking the preamble
        size and validity of each object. This is a little slower than with
        earlier versions of windows.

        Args:
          type: The object type name. If not specified we return all objects.
        """
        pool_align = self.obj_profile.get_constant("PoolAlignment")
        allocation_size = self.BlockSize * pool_align

        # Operate on a cached version of the next page.
        # We use a temporary buffer for the object to save reads of the image.
        start = self.obj_end
        cached_data = self.obj_vm.read(start, allocation_size)
        cached_vm = addrspace.BufferAddressSpace(
            base_offset=start,
            data=cached_data,
            session=self.obj_session,
            metadata=dict(image=self.obj_vm.metadata("image")))

        # We search for the _OBJECT_HEADER.InfoMask in close proximity to our
        # object. We build a lookup table between the values in the InfoMask and
        # the minimum distance there is between the start of _OBJECT_HEADER and
        # the end of _POOL_HEADER. This way we can quickly skip unreasonable
        # values.

        # This is the offset within _OBJECT_HEADER of InfoMask.
        info_mask_offset = self.obj_profile.get_obj_offset(
            "_OBJECT_HEADER", "InfoMask")

        # Build the cache if needed.
        if not self.lookup:
            self._BuildLookupTable()

        # Walk over all positions in the address space and try to fit an object
        # header there.
        for i in utils.xrange(start,
                              start + allocation_size - info_mask_offset,
                              pool_align):
            possible_info_mask = cached_data[i - start + info_mask_offset]
            #if possible_info_mask > '\x7f':
            #    continue

            # The minimum amount of space needed before the object header to
            # hold all the optional headers.
            minimum_offset = self.lookup[possible_info_mask]

            # Obviously wrong because we need more space than we have.
            if minimum_offset > i - start:
                continue

            # Create a test object header from the cached vm to test for
            # validity.
            test_object = self.obj_profile._OBJECT_HEADER(offset=i,
                                                          vm=cached_vm)

            if test_object.is_valid():
                if (type is None or test_object.get_object_type() == type or
                        # Freed objects point to index 2
                        #(which is also 0xbad0b0b0).
                    (freed and test_object.TypeIndex == 2)):
                    yield test_object
Exemple #17
0
    def IterObject(self, type=None, freed=True):
        """Generates possible _OBJECT_HEADER accounting for optional headers.

        Note that not all pool allocations have an _OBJECT_HEADER - only ones
        allocated from the the object manager. This means calling this method
        depends on which pool allocation you are after.

        On windows 8, pool allocations are done from preset sizes. This means
        that the allocation is never exactly the same size and we can not use
        the bottom up method like before.

        We therefore, have to build the headers forward by checking the preamble
        size and validity of each object. This is a little slower than with
        earlier versions of windows.

        Args:
          type: The object type name. If not specified we return all objects.
        """
        pool_align = self.obj_profile.get_constant("PoolAlignment")
        allocation_size = self.BlockSize * pool_align

        # Operate on a cached version of the next page.
        # We use a temporary buffer for the object to save reads of the image.
        start = self.obj_end
        cached_data = self.obj_vm.read(start, allocation_size)
        cached_vm = addrspace.BufferAddressSpace(
            base_offset=start, data=cached_data, session=self.obj_session,
            metadata=dict(image=self.obj_vm.metadata("image")))

        # We search for the _OBJECT_HEADER.InfoMask in close proximity to our
        # object. We build a lookup table between the values in the InfoMask and
        # the minimum distance there is between the start of _OBJECT_HEADER and
        # the end of _POOL_HEADER. This way we can quickly skip unreasonable
        # values.

        # This is the offset within _OBJECT_HEADER of InfoMask.
        info_mask_offset = self.obj_profile.get_obj_offset(
            "_OBJECT_HEADER", "InfoMask")

        # Build the cache if needed.
        if not self.lookup:
            self._BuildLookupTable()

        # Walk over all positions in the address space and try to fit an object
        # header there.
        for i in utils.xrange(start,
                              start + allocation_size - info_mask_offset,
                              pool_align):
            possible_info_mask = cached_data[i - start + info_mask_offset]
            #if possible_info_mask > '\x7f':
            #    continue

            # The minimum amount of space needed before the object header to
            # hold all the optional headers.
            minimum_offset = self.lookup[possible_info_mask]

            # Obviously wrong because we need more space than we have.
            if minimum_offset > i - start:
                continue

            # Create a test object header from the cached vm to test for
            # validity.
            test_object = self.obj_profile._OBJECT_HEADER(
                offset=i, vm=cached_vm)

            if test_object.is_valid():
                if (type is None or
                        test_object.get_object_type() == type or
                        # Freed objects point to index 2
                        #(which is also 0xbad0b0b0).
                        (freed and test_object.TypeIndex == 2)):
                    yield test_object