Exemplo n.º 1
0
    def _ScanProcess(self, psutil_process, args):
        if args.per_process_timeout:
            deadline = rdfvalue.RDFDatetime.Now() + args.per_process_timeout
        else:
            deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("1w")

        rules = args.yara_signature.GetRules()

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)
        with process:
            streamer = streaming.Streamer(chunk_size=args.chunk_size,
                                          overlap_size=args.overlap_size)
            matches = []

            try:
                for start, length in client_utils.MemoryRegions(process, args):
                    chunks = streamer.StreamMemory(process,
                                                   offset=start,
                                                   amount=length)
                    for m in self._ScanRegion(rules, chunks, deadline):
                        matches.append(m)
                        if (args.max_results_per_process > 0 and
                                len(matches) >= args.max_results_per_process):
                            return matches
            except yara.Error as e:
                # Yara internal error 30 is too many hits (obviously...). We
                # need to report this as a hit, not an error.
                if e.message == "internal error: 30":
                    return matches
                raise

        return matches
Exemplo n.º 2
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)
        streamer = streaming.Streamer(chunk_size=args.chunk_size)

        with client_utils.OpenProcessForMemoryAccess(
                psutil_process.pid) as process:
            regions = list(client_utils.MemoryRegions(process, args))

            if args.prioritize_offsets:
                regions = _PrioritizeRegions(regions, args.prioritize_offsets)

            if args.size_limit:
                total_regions = len(regions)
                regions = _ApplySizeLimit(regions, args.size_limit)
                if len(regions) < total_regions:
                    response.error = ("Byte limit exceeded. Writing {} of {} "
                                      "regions.").format(
                                          len(regions), total_regions)
            else:
                for region in regions:
                    region.dumped_size = region.size

            regions = sorted(regions, key=lambda r: r.start)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for region in regions:
                    self.Progress()
                    pathspec = self._SaveRegionToDirectory(
                        psutil_process, process, region, tmp_dir, streamer)
                    if pathspec is not None:
                        region.file = pathspec
                        response.memory_regions.Append(region)

        return response
Exemplo n.º 3
0
  def _GetMatches(self, psutil_process, scan_request):
    if scan_request.per_process_timeout:
      deadline = rdfvalue.RDFDatetime.Now() + scan_request.per_process_timeout
    else:
      deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From(
          1, rdfvalue.WEEKS)

    rules = scan_request.yara_signature.GetRules()

    process = client_utils.OpenProcessForMemoryAccess(pid=psutil_process.pid)
    with process:
      streamer = streaming.Streamer(
          chunk_size=scan_request.chunk_size,
          overlap_size=scan_request.overlap_size)
      matches = []

      try:
        for region in client_utils.MemoryRegions(process, scan_request):
          chunks = streamer.StreamMemory(
              process, offset=region.start, amount=region.size)
          for m in self._ScanRegion(rules, chunks, deadline):
            matches.append(m)
            if 0 < scan_request.max_results_per_process <= len(matches):
              return matches
      except yara.Error as e:
        # Yara internal error 30 is too many hits (obviously...). We
        # need to report this as a hit, not an error.
        if "internal error: 30" in str(e):
          return matches
        raise

    return matches
Exemplo n.º 4
0
    def _GetMatches(self, psutil_process, scan_request):
        if scan_request.per_process_timeout:
            deadline = rdfvalue.RDFDatetime.Now(
            ) + scan_request.per_process_timeout
        else:
            deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From(
                1, rdfvalue.WEEKS)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)
        with process:
            streamer = streaming.Streamer(
                chunk_size=scan_request.chunk_size,
                overlap_size=scan_request.overlap_size)
            matches = []

            try:
                for region in client_utils.MemoryRegions(
                        process, scan_request):
                    chunks = streamer.StreamRanges(offset=region.start,
                                                   amount=region.size)
                    for m in self._ScanRegion(process, chunks, deadline):
                        matches.append(m)
                        if 0 < scan_request.max_results_per_process <= len(
                                matches):
                            return matches
            except TooManyMatchesError:
                # We need to report this as a hit, not an error.
                return matches

        return matches
Exemplo n.º 5
0
    def _GetMatches(self, psutil_process, scan_request):
        if scan_request.per_process_timeout:
            deadline = rdfvalue.RDFDatetime.Now(
            ) + scan_request.per_process_timeout
        else:
            deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From(
                1, rdfvalue.WEEKS)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)
        with process:
            matches = []

            try:
                for chunks in self._BatchIterateRegions(process, scan_request):
                    for m in self._ScanRegion(process, chunks, deadline):
                        matches.append(m)
                        if 0 < scan_request.max_results_per_process <= len(
                                matches):
                            return matches
            except TooManyMatchesError:
                # We need to report this as a hit, not an error.
                return matches

        return matches
Exemplo n.º 6
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)

        bytes_limit = args.size_limit

        with process:
            streamer = streaming.Streamer(chunk_size=args.chunk_size)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for region in client_utils.MemoryRegions(process, args):

                    if bytes_limit and self.bytes_written + region.size > bytes_limit:
                        response.error = (
                            "Byte limit exceeded. Wrote %d bytes, "
                            "next block is %d bytes, limit is %d." %
                            (self.bytes_written, region.size, bytes_limit))
                        return response

                    end = region.start + region.size

                    # _ReplaceDumpPathspecsWithMultiGetFilePathspec in DumpProcessMemory
                    # flow asserts that MemoryRegions can be uniquely identified by their
                    # file's basename.
                    filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(),
                                                    psutil_process.pid,
                                                    region.start, end)
                    filepath = os.path.join(tmp_dir.path, filename)

                    chunks = streamer.StreamMemory(process,
                                                   offset=region.start,
                                                   amount=region.size)
                    bytes_written = self._SaveMemDumpToFilePath(
                        filepath, chunks)

                    if not bytes_written:
                        continue

                    self.bytes_written += bytes_written

                    # TODO: Remove workaround after client_utils are fixed.
                    canonical_path = client_utils.LocalPathToCanonicalPath(
                        filepath)
                    if not canonical_path.startswith("/"):
                        canonical_path = "/" + canonical_path

                    region.file = rdf_paths.PathSpec(
                        path=canonical_path,
                        pathtype=rdf_paths.PathSpec.PathType.TMPFILE)

                    response.memory_regions.Append(region)

        return response
Exemplo n.º 7
0
  def setUp(self):
    super().setUp()
    stack = contextlib.ExitStack()
    self.addCleanup(stack.close)

    self._process = stack.enter_context(
        client_utils.OpenProcessForMemoryAccess(os.getpid()))
    self._process.Open()

    self._process_file_descriptor = (
        communication.FileDescriptor.FromSerialized(
            self._process.serialized_file_descriptor, communication.Mode.READ))

    self._server = stack.enter_context(
        server.CreateMemoryServer([self._process_file_descriptor]))
    self._client = client.Client(self._server.Connect())
Exemplo n.º 8
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)

        bytes_limit = args.size_limit

        with process:
            streamer = streaming.Streamer(chunk_size=args.chunk_size)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for start, length in client_utils.MemoryRegions(process, args):

                    if bytes_limit and self.bytes_written + length > bytes_limit:
                        response.error = (
                            "Byte limit exceeded. Wrote %d bytes, "
                            "next block is %d bytes, limit is %d." %
                            (self.bytes_written, length, bytes_limit))
                        return response

                    end = start + length
                    # TODO: The filename is parsed on the server side to
                    # extract the memory address again. This should be changed by
                    # saving the `start` and `end` in YaraProcessDumpInformation.
                    filename = "%s_%d_%x_%x.tmp" % (
                        psutil_process.name(), psutil_process.pid, start, end)
                    filepath = os.path.join(tmp_dir.path, filename)

                    chunks = streamer.StreamMemory(process,
                                                   offset=start,
                                                   amount=length)
                    bytes_written = self._SaveMemDumpToFilePath(
                        filepath, chunks)

                    if not bytes_written:
                        continue

                    self.bytes_written += bytes_written
                    response.dump_files.Append(
                        rdf_paths.PathSpec(
                            path=filepath,
                            pathtype=rdf_paths.PathSpec.PathType.TMPFILE))

        return response
Exemplo n.º 9
0
 def Open(self) -> None:
   with contextlib.ExitStack() as stack:
     file_descriptors = []
     for psutil_process in self._psutil_processes:
       try:
         process = stack.enter_context(
             client_utils.OpenProcessForMemoryAccess(psutil_process.pid))
       except Exception as e:  # pylint: disable=broad-except
         # OpenProcessForMemoryAccess can raise any exception upon error.
         self._pid_to_exception[psutil_process.pid] = e
         continue
       self._pid_to_serializable_file_descriptor[
           psutil_process.pid] = process.serialized_file_descriptor
       file_descriptors.append(
           communication.FileDescriptor.FromSerialized(
               process.serialized_file_descriptor, communication.Mode.READ))
     self._server = memory_server.CreateMemoryServer(file_descriptors)
     self._server.Start()
     self._client = memory_client.Client(self._server.Connect())
Exemplo n.º 10
0
  def DumpProcess(self, psutil_process, args):
    response = rdf_yara.YaraProcessDumpInformation()
    response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

    process = client_utils.OpenProcessForMemoryAccess(pid=psutil_process.pid)

    bytes_limit = args.size_limit

    with process:
      streamer = streaming.MemoryStreamer(process, chunk_size=args.chunk_size)

      with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
        for start, length in client_utils.MemoryRegions(process, args):

          if bytes_limit and self.bytes_written + length > bytes_limit:
            response.error = ("Byte limit exceeded. Wrote %d bytes, "
                              "next block is %d bytes, limit is %d." %
                              (self.bytes_written, length, bytes_limit))
            return response

          end = start + length
          filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(),
                                          psutil_process.pid, start, end)
          filepath = os.path.join(tmp_dir.path, filename)

          bytes_written = self._SaveMemDumpToFilePath(filepath, streamer, start,
                                                      length)

          if not bytes_written:
            continue

          self.bytes_written += bytes_written
          response.dump_files.Append(
              rdf_paths.PathSpec(
                  path=filepath, pathtype=rdf_paths.PathSpec.PathType.TMPFILE))

    return response