Exemple #1
0
 def testLegacyDataMigration(self):
     res = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
         rdf_memory.YaraProcessDumpInformation(dump_files=[
             rdf_paths.PathSpec(path="C:\\Foo\\Bar\\%s_%d_%x_%x.tmp" %
                                ("my_proc", 123, 111, 222),
                                pathtype="TMPFILE"),
             rdf_paths.PathSpec(path="/foo/bar/%s_%d_%x_%x.tmp" %
                                ("my_proc", 123, 456, 789),
                                pathtype="TMPFILE")
         ])
     ])
     memory._MigrateLegacyDumpFilesToMemoryAreas(res)
     self.assertEqual(
         res,
         rdf_memory.YaraProcessDumpResponse(dumped_processes=[
             rdf_memory.YaraProcessDumpInformation(memory_regions=[
                 rdf_memory.ProcessMemoryRegion(
                     start=111,
                     size=111,
                     file=rdf_paths.PathSpec(
                         path="/C:/Foo/Bar/%s_%d_%x_%x.tmp" %
                         ("my_proc", 123, 111, 222),
                         pathtype="TMPFILE")),
                 rdf_memory.ProcessMemoryRegion(
                     start=456,
                     size=333,
                     file=rdf_paths.PathSpec(
                         path="/foo/bar/%s_%d_%x_%x.tmp" %
                         ("my_proc", 123, 456, 789),
                         pathtype="TMPFILE"))
             ])
         ]))
  def testMultipleClientsWithIdenticalPathsYieldDifferentPMIs(self):
    client1 = self.SetupClient(1)
    client2 = self.SetupClient(2)

    yara_dump1 = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
        ])
    ])
    yara_dump2 = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
        ])
    ])
    stat_entry1 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE))
    stat_entry2 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE))
    m0 = rdf_flows.GrrMessage(source=client1, payload=yara_dump1)
    m1 = rdf_flows.GrrMessage(source=client2, payload=yara_dump2)
    m2 = rdf_flows.GrrMessage(source=client1, payload=stat_entry1)
    m3 = rdf_flows.GrrMessage(source=client2, payload=stat_entry2)
    self._ProcessValuesWithPlugin([m0, m1, m2, m3])

    self.assertEqual(self.plugin.OutputMemoryDump.call_count, 2)
    self.plugin.OutputMemoryDump.assert_any_call(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
        ]), client1)
    self.plugin.OutputMemoryDump.assert_any_call(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
        ]), client2)
Exemple #3
0
    def testPathSpecCasingIsCorrected(self):
        flow = memory.DumpProcessMemory(rdf_flow_objects.Flow())
        flow.SendReply = mock.Mock(spec=flow.SendReply)

        request = rdf_flow_objects.FlowRequest(
            request_data={
                "YaraProcessDumpResponse":
                rdf_memory.YaraProcessDumpResponse(dumped_processes=[
                    rdf_memory.YaraProcessDumpInformation(memory_regions=[
                        rdf_memory.ProcessMemoryRegion(
                            start=1,
                            size=1,
                            file=rdf_paths.PathSpec.Temp(
                                path="/C:/grr/x_1_0_1.tmp")),
                        rdf_memory.ProcessMemoryRegion(
                            start=1,
                            size=1,
                            file=rdf_paths.PathSpec.Temp(
                                path="/C:/GRR/x_1_1_2.tmp"))
                    ])
                ])
            })
        pathspecs = [
            rdf_paths.PathSpec.Temp(path="/C:/Grr/x_1_0_1.tmp"),
            rdf_paths.PathSpec.Temp(path="/C:/Grr/x_1_1_2.tmp")
        ]
        responses = flow_responses.Responses.FromResponses(
            request, [
                rdf_flow_objects.FlowResponse(payload=rdf_client_fs.StatEntry(
                    pathspec=pathspec)) for pathspec in pathspecs
            ])

        flow.ProcessMemoryRegions(responses)
        flow.SendReply.assert_any_call(
            rdf_memory.YaraProcessDumpResponse(dumped_processes=[
                rdf_memory.YaraProcessDumpInformation(memory_regions=[
                    rdf_memory.ProcessMemoryRegion(
                        start=1,
                        size=1,
                        file=rdf_paths.PathSpec.Temp(
                            path="/C:/Grr/x_1_0_1.tmp")),
                    rdf_memory.ProcessMemoryRegion(
                        start=1,
                        size=1,
                        file=rdf_paths.PathSpec.Temp(
                            path="/C:/Grr/x_1_1_2.tmp"))
                ])
            ]))
Exemple #4
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)
        streamer = streaming.Streamer(chunk_size=args.chunk_size)

        with client_utils.OpenProcessForMemoryAccess(
                psutil_process.pid) as process:
            regions = list(client_utils.MemoryRegions(process, args))

            if args.prioritize_offsets:
                regions = _PrioritizeRegions(regions, args.prioritize_offsets)

            if args.size_limit:
                total_regions = len(regions)
                regions = _ApplySizeLimit(regions, args.size_limit)
                if len(regions) < total_regions:
                    response.error = ("Byte limit exceeded. Writing {} of {} "
                                      "regions.").format(
                                          len(regions), total_regions)
            else:
                for region in regions:
                    region.dumped_size = region.size

            regions = sorted(regions, key=lambda r: r.start)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for region in regions:
                    self.Progress()
                    pathspec = self._SaveRegionToDirectory(
                        psutil_process, process, region, tmp_dir, streamer)
                    if pathspec is not None:
                        region.file = pathspec
                        response.memory_regions.Append(region)

        return response
  def testOutputPluginPersistsStateCorrectly(self):
    yara_dump_0 = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
            rdf_paths.PathSpec(path="my_proc_123_fa_104.tmp", pathtype=TMPFILE)
        ])
    ])
    yara_dump_1 = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="foobar_456_f0_fa.tmp", pathtype=TMPFILE),
        ])
    ])
    stat_entry_00 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE))
    stat_entry_01 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="my_proc_123_fa_104.tmp", pathtype=TMPFILE))
    stat_entry_10 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="foobar_456_f0_fa.tmp", pathtype=TMPFILE))

    self._ProcessValuesWithPlugin(
        [rdf_flows.GrrMessage(source=self.client_id, payload=yara_dump_0)])
    self.plugin = None

    self._ProcessValuesWithPlugin(
        [rdf_flows.GrrMessage(source=self.client_id, payload=yara_dump_1)])
    self.plugin = None

    self._ProcessValuesWithPlugin(
        [rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry_00)])
    self.plugin = None
    self._ProcessValuesWithPlugin(
        [rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry_01)])
    self.plugin.OutputMemoryDump.assert_called_once_with(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
            rdf_paths.PathSpec(path="my_proc_123_fa_104.tmp", pathtype=TMPFILE)
        ]), self.client_id)
    self.plugin = None
    self._ProcessValuesWithPlugin(
        [rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry_10)])
    self.plugin.OutputMemoryDump.assert_called_once_with(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="foobar_456_f0_fa.tmp", pathtype=TMPFILE),
        ]), self.client_id)
  def testCallsOutputMemoryDumpWithSingleBlob(self):
    yara_dump = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
        ])
    ])
    stat_entry = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE))
    m0 = rdf_flows.GrrMessage(source=self.client_id, payload=yara_dump)
    m1 = rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry)
    self._ProcessValuesWithPlugin([m0, m1])

    self.plugin.OutputMemoryDump.assert_called_once_with(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
        ]), self.client_id)
Exemple #7
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)

        bytes_limit = args.size_limit

        with process:
            streamer = streaming.Streamer(chunk_size=args.chunk_size)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for region in client_utils.MemoryRegions(process, args):

                    if bytes_limit and self.bytes_written + region.size > bytes_limit:
                        response.error = (
                            "Byte limit exceeded. Wrote %d bytes, "
                            "next block is %d bytes, limit is %d." %
                            (self.bytes_written, region.size, bytes_limit))
                        return response

                    end = region.start + region.size

                    # _ReplaceDumpPathspecsWithMultiGetFilePathspec in DumpProcessMemory
                    # flow asserts that MemoryRegions can be uniquely identified by their
                    # file's basename.
                    filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(),
                                                    psutil_process.pid,
                                                    region.start, end)
                    filepath = os.path.join(tmp_dir.path, filename)

                    chunks = streamer.StreamMemory(process,
                                                   offset=region.start,
                                                   amount=region.size)
                    bytes_written = self._SaveMemDumpToFilePath(
                        filepath, chunks)

                    if not bytes_written:
                        continue

                    self.bytes_written += bytes_written

                    # TODO: Remove workaround after client_utils are fixed.
                    canonical_path = client_utils.LocalPathToCanonicalPath(
                        filepath)
                    if not canonical_path.startswith("/"):
                        canonical_path = "/" + canonical_path

                    region.file = rdf_paths.PathSpec(
                        path=canonical_path,
                        pathtype=rdf_paths.PathSpec.PathType.TMPFILE)

                    response.memory_regions.Append(region)

        return response
  def testProcessesMultipleYaraProcessDumpResponsesCorrectly(self):
    yara_dump_0 = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
            rdf_paths.PathSpec(path="my_proc_123_fa_104.tmp", pathtype=TMPFILE)
        ])
    ])
    yara_dump_1 = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="foobar_456_f0_fa.tmp", pathtype=TMPFILE),
        ])
    ])
    stat_entry_00 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE))
    stat_entry_01 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="my_proc_123_fa_104.tmp", pathtype=TMPFILE))
    stat_entry_10 = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="foobar_456_f0_fa.tmp", pathtype=TMPFILE))

    m0 = rdf_flows.GrrMessage(source=self.client_id, payload=yara_dump_0)
    m1 = rdf_flows.GrrMessage(source=self.client_id, payload=yara_dump_1)
    m2 = rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry_00)
    m3 = rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry_01)
    m4 = rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry_10)
    self._ProcessValuesWithPlugin([m0, m1, m2, m3, m4])

    self.assertEqual(self.plugin.OutputMemoryDump.call_count, 2)
    self.plugin.OutputMemoryDump.assert_any_call(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
            rdf_paths.PathSpec(path="my_proc_123_fa_104.tmp", pathtype=TMPFILE)
        ]), self.client_id)
    self.plugin.OutputMemoryDump.assert_any_call(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(path="foobar_456_f0_fa.tmp", pathtype=TMPFILE),
        ]), self.client_id)
  def testIsResilientToTemporaryPathSpecRegression(self):
    yara_dump = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(
                path="C:\\Foo\\my_proc_123_f0_fa.tmp", pathtype=TMPFILE),
        ])
    ])
    stat_entry = rdf_client_fs.StatEntry(
        pathspec=rdf_paths.PathSpec(
            path="/C:/Foo/my_proc_123_f0_fa.tmp",
            pathtype=TMPFILE,
            path_options=rdf_paths.PathSpec.Options.CASE_LITERAL))
    m0 = rdf_flows.GrrMessage(source=self.client_id, payload=yara_dump)
    m1 = rdf_flows.GrrMessage(source=self.client_id, payload=stat_entry)
    self._ProcessValuesWithPlugin([m0, m1])

    self.plugin.OutputMemoryDump.assert_called_once_with(
        rdf_memory.YaraProcessDumpInformation(dump_files=[
            rdf_paths.PathSpec(
                path="/C:/Foo/my_proc_123_f0_fa.tmp",
                pathtype=TMPFILE,
                path_options=rdf_paths.PathSpec.Options.CASE_LITERAL),
        ]), self.client_id)
Exemple #10
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)

        bytes_limit = args.size_limit

        with process:
            streamer = streaming.Streamer(chunk_size=args.chunk_size)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for start, length in client_utils.MemoryRegions(process, args):

                    if bytes_limit and self.bytes_written + length > bytes_limit:
                        response.error = (
                            "Byte limit exceeded. Wrote %d bytes, "
                            "next block is %d bytes, limit is %d." %
                            (self.bytes_written, length, bytes_limit))
                        return response

                    end = start + length
                    # TODO: The filename is parsed on the server side to
                    # extract the memory address again. This should be changed by
                    # saving the `start` and `end` in YaraProcessDumpInformation.
                    filename = "%s_%d_%x_%x.tmp" % (
                        psutil_process.name(), psutil_process.pid, start, end)
                    filepath = os.path.join(tmp_dir.path, filename)

                    chunks = streamer.StreamMemory(process,
                                                   offset=start,
                                                   amount=length)
                    bytes_written = self._SaveMemDumpToFilePath(
                        filepath, chunks)

                    if not bytes_written:
                        continue

                    self.bytes_written += bytes_written
                    response.dump_files.Append(
                        rdf_paths.PathSpec(
                            path=filepath,
                            pathtype=rdf_paths.PathSpec.PathType.TMPFILE))

        return response
Exemple #11
0
    def ProcessResponses(self, state, responses):
        """Processes Yara memory dumps and collected memory areas.

    Args:
      state: persistent plugin state
      responses: GrrMessages, containing YaraProcessDumpResponse, StatEntry, and
        other RDFValues
    """

        # This Plugin keeps local state, grouped by the client id that sent the
        # Yara dump:
        # - `dumps` is a list of all seen YaraProcessDumpInformation
        # - `paths` is a list of all collected memory areas
        # - `completed` is a list of all exported YaraProcessDumpInformation
        self._ReInitializeLocalState(state)

        for response in responses:
            client_id = response.source.Basename()
            client_state = self._GetOrInitClientState(client_id)

            # First, add the data that came in to the Plugin's state.
            if isinstance(response.payload,
                          rdf_memory.YaraProcessDumpResponse):
                client_state.dumps.Extend(response.payload.dumped_processes)
            elif isinstance(response.payload, rdf_client_fs.StatEntry):
                client_state.paths.Append(response.payload.pathspec)

            # Second, iterate through the state and process all Yara dumps whose
            # memory areas have been fully collected.
            for process_dump in self._IterateReadyProcessDumpsOnce(
                    client_state):
                # TODO: Fix Windows PathSpec inconsistency.
                paths = _NormalizePaths(client_state.paths,
                                        process_dump.dump_files)
                fixed_pd = rdf_memory.YaraProcessDumpInformation(process_dump)
                fixed_pd.dump_files = paths
                self.OutputMemoryDump(fixed_pd, client_id)