Пример #1
0
    def _ScanProcess(self, psutil_process, args):
        if args.per_process_timeout:
            deadline = rdfvalue.RDFDatetime.Now() + args.per_process_timeout
        else:
            deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("1w")

        rules = args.yara_signature.GetRules()

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)
        with process:
            streamer = streaming.MemoryStreamer(process,
                                                chunk_size=args.chunk_size,
                                                overlap_size=args.overlap_size)
            matches = []

            try:
                for start, length in client_utils.MemoryRegions(process, args):
                    for m in self._ScanRegion(rules, streamer, start, length,
                                              deadline):
                        matches.append(m)
                        if (args.max_results_per_process > 0 and
                                len(matches) >= args.max_results_per_process):
                            return matches
            except yara.Error as e:
                # Yara internal error 30 is too many hits (obviously...). We
                # need to report this as a hit, not an error.
                if e.message == "internal error: 30":
                    return matches
                raise

        return matches
Пример #2
0
    def testOddChunkSize(self):
        data = "foofoobarfoofoo"
        p = MockProcess(data)

        s = streaming.MemoryStreamer(p, chunk_size=2, overlap_size=0)

        res = [len(chunk.data) for chunk in s.Stream(0, 100)]
        self.assertEqual(res, [2, 2, 2, 2, 2, 2, 2, 1])
Пример #3
0
    def testChunking(self):
        data = "foofoobarfoofoo"
        p = MockProcess(data)

        s = streaming.MemoryStreamer(p, chunk_size=2, overlap_size=0)

        res = [chunk.data for chunk in s.Stream(0, 100)]
        self.assertEqual("".join(res), data)

        res = []
        res.extend(chunk.data for chunk in s.Stream(0, 6))
        res.extend(chunk.data for chunk in s.Stream(6, 100))
        self.assertEqual("".join(res), data)
Пример #4
0
    def testOverlap(self):
        data = "foofoobarfoofoo"

        p = MockProcess(data)
        s = streaming.MemoryStreamer(p, chunk_size=5, overlap_size=2)
        res = [chunk.data for chunk in s.Stream(0, 100)]

        # Original data is length 15, we get 5 chars in the first chunk + 3 more in
        # each additional one. The last chunk is short (2 overlap+ 1 data).
        self.assertEqual(len(res), 5)
        self.assertEqual(map(len, res), [5, 5, 5, 5, 3])
        for i in range(len(res) - 1):
            self.assertEqual(res[i][-2:], res[i + 1][:2])

        self.assertEqual([chunk.offset for chunk in s.Stream(0, 100)],
                         [0, 3, 6, 9, 12])
        self.assertEqual([chunk.overlap for chunk in s.Stream(0, 100)],
                         [0, 2, 2, 2, 2])
Пример #5
0
  def _ScanProcess(self, psutil_process, args):
    if args.per_process_timeout:
      deadline = rdfvalue.RDFDatetime.Now() + args.per_process_timeout
    else:
      deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("1w")

    rules = args.yara_signature.GetRules()

    process = client_utils.OpenProcessForMemoryAccess(pid=psutil_process.pid)
    with process:
      streamer = streaming.MemoryStreamer(
          process, chunk_size=args.chunk_size, overlap_size=args.overlap_size)
      matches = []

      for start, length in client_utils.MemoryRegions(process, args):
        for m in self._ScanRegion(rules, streamer, start, length, deadline):
          matches.append(m)
    return matches
Пример #6
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_yara.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)

        bytes_limit = args.size_limit

        with process:
            streamer = streaming.MemoryStreamer(process,
                                                chunk_size=args.chunk_size)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for start, length in client_utils.MemoryRegions(process, args):

                    if bytes_limit and self.bytes_written + length > bytes_limit:
                        response.error = (
                            "Byte limit exceeded. Wrote %d bytes, "
                            "next block is %d bytes, limit is %d." %
                            (self.bytes_written, length, bytes_limit))
                        return response

                    end = start + length
                    filename = "%s_%d_%x_%x.tmp" % (
                        psutil_process.name(), psutil_process.pid, start, end)
                    filepath = os.path.join(tmp_dir.path, filename)

                    bytes_written = self._SaveMemDumpToFilePath(
                        filepath, streamer, start, length)

                    if not bytes_written:
                        continue

                    self.bytes_written += bytes_written
                    response.dump_files.Append(
                        rdf_paths.PathSpec(
                            path=filepath,
                            pathtype=rdf_paths.PathSpec.PathType.TMPFILE))

        return response