Exemple #1
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)
        streamer = streaming.Streamer(chunk_size=args.chunk_size)

        with client_utils.OpenProcessForMemoryAccess(
                psutil_process.pid) as process:
            regions = list(client_utils.MemoryRegions(process, args))

            if args.prioritize_offsets:
                regions = _PrioritizeRegions(regions, args.prioritize_offsets)

            if args.size_limit:
                total_regions = len(regions)
                regions = _ApplySizeLimit(regions, args.size_limit)
                if len(regions) < total_regions:
                    response.error = ("Byte limit exceeded. Writing {} of {} "
                                      "regions.").format(
                                          len(regions), total_regions)
            else:
                for region in regions:
                    region.dumped_size = region.size

            regions = sorted(regions, key=lambda r: r.start)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for region in regions:
                    self.Progress()
                    pathspec = self._SaveRegionToDirectory(
                        psutil_process, process, region, tmp_dir, streamer)
                    if pathspec is not None:
                        region.file = pathspec
                        response.memory_regions.Append(region)

        return response
Exemple #2
0
  def Run(self, args):
    """Use eficheck to extract the binary image of the flash.

    Args:
      args: EficheckConfig
    Returns:
      DumpEfiImageResponse

    This action executes eficheck multiple times:
      * First to get the binary version, using --version.
      * Use --save -b firmware.bin to save the image.
    """

    eficheck_version = self._GetVersion(args)
    if not eficheck_version:
      return False

    with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
      res = client_utils_common.Execute(
          args.cmd_path, ["--save", "-b", "firmware.bin"], cwd=tmp_dir.path)
      stdout, stderr, exit_status, time_used = res
      binary_response = rdf_client_action.ExecuteBinaryResponse(
          stdout=stdout,
          stderr=stderr,
          exit_status=exit_status,
          time_used=time_used)
      response = rdf_apple_firmware.DumpEfiImageResponse(
          eficheck_version=eficheck_version, response=binary_response)
      if exit_status:
        tmp_dir.cleanup = True
      else:
        response.path = rdf_paths.PathSpec(
            path=os.path.join(tmp_dir.path, "firmware.bin"),
            pathtype=rdf_paths.PathSpec.PathType.TMPFILE)
      self.SendReply(response)
Exemple #3
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)

        bytes_limit = args.size_limit

        with process:
            streamer = streaming.Streamer(chunk_size=args.chunk_size)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for region in client_utils.MemoryRegions(process, args):

                    if bytes_limit and self.bytes_written + region.size > bytes_limit:
                        response.error = (
                            "Byte limit exceeded. Wrote %d bytes, "
                            "next block is %d bytes, limit is %d." %
                            (self.bytes_written, region.size, bytes_limit))
                        return response

                    end = region.start + region.size

                    # _ReplaceDumpPathspecsWithMultiGetFilePathspec in DumpProcessMemory
                    # flow asserts that MemoryRegions can be uniquely identified by their
                    # file's basename.
                    filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(),
                                                    psutil_process.pid,
                                                    region.start, end)
                    filepath = os.path.join(tmp_dir.path, filename)

                    chunks = streamer.StreamMemory(process,
                                                   offset=region.start,
                                                   amount=region.size)
                    bytes_written = self._SaveMemDumpToFilePath(
                        filepath, chunks)

                    if not bytes_written:
                        continue

                    self.bytes_written += bytes_written

                    # TODO: Remove workaround after client_utils are fixed.
                    canonical_path = client_utils.LocalPathToCanonicalPath(
                        filepath)
                    if not canonical_path.startswith("/"):
                        canonical_path = "/" + canonical_path

                    region.file = rdf_paths.PathSpec(
                        path=canonical_path,
                        pathtype=rdf_paths.PathSpec.PathType.TMPFILE)

                    response.memory_regions.Append(region)

        return response
Exemple #4
0
    def DumpProcess(self, psutil_process, args):
        response = rdf_memory.YaraProcessDumpInformation()
        response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

        process = client_utils.OpenProcessForMemoryAccess(
            pid=psutil_process.pid)

        bytes_limit = args.size_limit

        with process:
            streamer = streaming.Streamer(chunk_size=args.chunk_size)

            with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
                for start, length in client_utils.MemoryRegions(process, args):

                    if bytes_limit and self.bytes_written + length > bytes_limit:
                        response.error = (
                            "Byte limit exceeded. Wrote %d bytes, "
                            "next block is %d bytes, limit is %d." %
                            (self.bytes_written, length, bytes_limit))
                        return response

                    end = start + length
                    # TODO: The filename is parsed on the server side to
                    # extract the memory address again. This should be changed by
                    # saving the `start` and `end` in YaraProcessDumpInformation.
                    filename = "%s_%d_%x_%x.tmp" % (
                        psutil_process.name(), psutil_process.pid, start, end)
                    filepath = os.path.join(tmp_dir.path, filename)

                    chunks = streamer.StreamMemory(process,
                                                   offset=start,
                                                   amount=length)
                    bytes_written = self._SaveMemDumpToFilePath(
                        filepath, chunks)

                    if not bytes_written:
                        continue

                    self.bytes_written += bytes_written
                    response.dump_files.Append(
                        rdf_paths.PathSpec(
                            path=filepath,
                            pathtype=rdf_paths.PathSpec.PathType.TMPFILE))

        return response
Exemple #5
0
  def DumpProcess(self, psutil_process, args):
    response = rdf_yara.YaraProcessDumpInformation()
    response.process = rdf_client.Process.FromPsutilProcess(psutil_process)

    process = client_utils.OpenProcessForMemoryAccess(pid=psutil_process.pid)

    bytes_limit = args.size_limit

    with process:
      streamer = streaming.MemoryStreamer(process, chunk_size=args.chunk_size)

      with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir:
        for start, length in client_utils.MemoryRegions(process, args):

          if bytes_limit and self.bytes_written + length > bytes_limit:
            response.error = ("Byte limit exceeded. Wrote %d bytes, "
                              "next block is %d bytes, limit is %d." %
                              (self.bytes_written, length, bytes_limit))
            return response

          end = start + length
          filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(),
                                          psutil_process.pid, start, end)
          filepath = os.path.join(tmp_dir.path, filename)

          bytes_written = self._SaveMemDumpToFilePath(filepath, streamer, start,
                                                      length)

          if not bytes_written:
            continue

          self.bytes_written += bytes_written
          response.dump_files.Append(
              rdf_paths.PathSpec(
                  path=filepath, pathtype=rdf_paths.PathSpec.PathType.TMPFILE))

    return response
Exemple #6
0
    def Run(self, args):
        """Use eficheck to extract hash files in plaintext.

    Args:
      args: EficheckConfig
    Returns:
      CollectEfiHashesResponse

    This action executes eficheck multiple times:
      * First to get the binary version, using --version.
      * Then with the --generate-hashes option. This will create one or more
        .ealf files. Each file contains a binary representation of the hashes
        extracted from a part of the flash image (e.g, EFI, SEC).
      * For each file generated, we use the --show-hashes option to get a
        plaintext representation of the hashes. This raw output is sent to the
        server which will perform further parsing.
    """

        eficheck_version = self._GetVersion(args)
        if not eficheck_version:
            return False

        with tempfiles.TemporaryDirectory() as tmp_dir:
            res = client_utils_common.Execute(args.cmd_path,
                                              ["--generate-hashes"],
                                              cwd=tmp_dir.path)
            stdout, stderr, exit_status, time_used = res
            # If something went wrong, forward the output directly.
            if exit_status:
                binary_response = rdf_client_action.ExecuteBinaryResponse(
                    stdout=stdout,
                    stderr=stderr,
                    exit_status=exit_status,
                    time_used=time_used)
                self.SendReply(
                    rdf_apple_firmware.CollectEfiHashesResponse(
                        response=binary_response))
                return
            # Otherwise, convert all the files generated and forward the output.

            for filename in glob.glob(os.path.join(tmp_dir.path, "*.ealf")):
                cmd_args = ["--show-hashes", "-h", filename]
                # Get the boot rom version from the filename.
                basename = os.path.basename(filename)
                if not self._FILENAME_RE.match(basename):
                    continue
                boot_rom_version, _ = os.path.splitext(basename)
                stdout, stderr, exit_status, time_used = client_utils_common.Execute(
                    args.cmd_path, cmd_args, bypass_allowlist=True)

                binary_response = rdf_client_action.ExecuteBinaryResponse(
                    stdout=stdout,
                    stderr=stderr,
                    exit_status=exit_status,
                    time_used=time_used)
                self.SendReply(
                    rdf_apple_firmware.CollectEfiHashesResponse(
                        eficheck_version=eficheck_version,
                        boot_rom_version=boot_rom_version,
                        response=binary_response))

                tempfiles.DeleteGRRTempFile(filename)