def _ScanProcess(self, psutil_process, args): if args.per_process_timeout: deadline = rdfvalue.RDFDatetime.Now() + args.per_process_timeout else: deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("1w") rules = args.yara_signature.GetRules() process = client_utils.OpenProcessForMemoryAccess( pid=psutil_process.pid) with process: streamer = streaming.Streamer(chunk_size=args.chunk_size, overlap_size=args.overlap_size) matches = [] try: for start, length in client_utils.MemoryRegions(process, args): chunks = streamer.StreamMemory(process, offset=start, amount=length) for m in self._ScanRegion(rules, chunks, deadline): matches.append(m) if (args.max_results_per_process > 0 and len(matches) >= args.max_results_per_process): return matches except yara.Error as e: # Yara internal error 30 is too many hits (obviously...). We # need to report this as a hit, not an error. if e.message == "internal error: 30": return matches raise return matches
def DumpProcess(self, psutil_process, args): response = rdf_memory.YaraProcessDumpInformation() response.process = rdf_client.Process.FromPsutilProcess(psutil_process) streamer = streaming.Streamer(chunk_size=args.chunk_size) with client_utils.OpenProcessForMemoryAccess( psutil_process.pid) as process: regions = list(client_utils.MemoryRegions(process, args)) if args.prioritize_offsets: regions = _PrioritizeRegions(regions, args.prioritize_offsets) if args.size_limit: total_regions = len(regions) regions = _ApplySizeLimit(regions, args.size_limit) if len(regions) < total_regions: response.error = ("Byte limit exceeded. Writing {} of {} " "regions.").format( len(regions), total_regions) else: for region in regions: region.dumped_size = region.size regions = sorted(regions, key=lambda r: r.start) with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir: for region in regions: self.Progress() pathspec = self._SaveRegionToDirectory( psutil_process, process, region, tmp_dir, streamer) if pathspec is not None: region.file = pathspec response.memory_regions.Append(region) return response
def _GetMatches(self, psutil_process, scan_request): if scan_request.per_process_timeout: deadline = rdfvalue.RDFDatetime.Now( ) + scan_request.per_process_timeout else: deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From( 1, rdfvalue.WEEKS) process = client_utils.OpenProcessForMemoryAccess( pid=psutil_process.pid) with process: streamer = streaming.Streamer( chunk_size=scan_request.chunk_size, overlap_size=scan_request.overlap_size) matches = [] try: for region in client_utils.MemoryRegions( process, scan_request): chunks = streamer.StreamRanges(offset=region.start, amount=region.size) for m in self._ScanRegion(process, chunks, deadline): matches.append(m) if 0 < scan_request.max_results_per_process <= len( matches): return matches except TooManyMatchesError: # We need to report this as a hit, not an error. return matches return matches
def _GetMatches(self, psutil_process, scan_request): if scan_request.per_process_timeout: deadline = rdfvalue.RDFDatetime.Now() + scan_request.per_process_timeout else: deadline = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From( 1, rdfvalue.WEEKS) rules = scan_request.yara_signature.GetRules() process = client_utils.OpenProcessForMemoryAccess(pid=psutil_process.pid) with process: streamer = streaming.Streamer( chunk_size=scan_request.chunk_size, overlap_size=scan_request.overlap_size) matches = [] try: for region in client_utils.MemoryRegions(process, scan_request): chunks = streamer.StreamMemory( process, offset=region.start, amount=region.size) for m in self._ScanRegion(rules, chunks, deadline): matches.append(m) if 0 < scan_request.max_results_per_process <= len(matches): return matches except yara.Error as e: # Yara internal error 30 is too many hits (obviously...). We # need to report this as a hit, not an error. if "internal error: 30" in str(e): return matches raise return matches
def DumpProcess(self, psutil_process, args): response = rdf_memory.YaraProcessDumpInformation() response.process = rdf_client.Process.FromPsutilProcess(psutil_process) process = client_utils.OpenProcessForMemoryAccess( pid=psutil_process.pid) bytes_limit = args.size_limit with process: streamer = streaming.Streamer(chunk_size=args.chunk_size) with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir: for region in client_utils.MemoryRegions(process, args): if bytes_limit and self.bytes_written + region.size > bytes_limit: response.error = ( "Byte limit exceeded. Wrote %d bytes, " "next block is %d bytes, limit is %d." % (self.bytes_written, region.size, bytes_limit)) return response end = region.start + region.size # _ReplaceDumpPathspecsWithMultiGetFilePathspec in DumpProcessMemory # flow asserts that MemoryRegions can be uniquely identified by their # file's basename. filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(), psutil_process.pid, region.start, end) filepath = os.path.join(tmp_dir.path, filename) chunks = streamer.StreamMemory(process, offset=region.start, amount=region.size) bytes_written = self._SaveMemDumpToFilePath( filepath, chunks) if not bytes_written: continue self.bytes_written += bytes_written # TODO: Remove workaround after client_utils are fixed. canonical_path = client_utils.LocalPathToCanonicalPath( filepath) if not canonical_path.startswith("/"): canonical_path = "/" + canonical_path region.file = rdf_paths.PathSpec( path=canonical_path, pathtype=rdf_paths.PathSpec.PathType.TMPFILE) response.memory_regions.Append(region) return response
def DumpProcess(self, psutil_process, args): response = rdf_memory.YaraProcessDumpInformation() response.process = rdf_client.Process.FromPsutilProcess(psutil_process) process = client_utils.OpenProcessForMemoryAccess( pid=psutil_process.pid) bytes_limit = args.size_limit with process: streamer = streaming.Streamer(chunk_size=args.chunk_size) with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir: for start, length in client_utils.MemoryRegions(process, args): if bytes_limit and self.bytes_written + length > bytes_limit: response.error = ( "Byte limit exceeded. Wrote %d bytes, " "next block is %d bytes, limit is %d." % (self.bytes_written, length, bytes_limit)) return response end = start + length # TODO: The filename is parsed on the server side to # extract the memory address again. This should be changed by # saving the `start` and `end` in YaraProcessDumpInformation. filename = "%s_%d_%x_%x.tmp" % ( psutil_process.name(), psutil_process.pid, start, end) filepath = os.path.join(tmp_dir.path, filename) chunks = streamer.StreamMemory(process, offset=start, amount=length) bytes_written = self._SaveMemDumpToFilePath( filepath, chunks) if not bytes_written: continue self.bytes_written += bytes_written response.dump_files.Append( rdf_paths.PathSpec( path=filepath, pathtype=rdf_paths.PathSpec.PathType.TMPFILE)) return response
def _BatchIterateRegions( self, process, scan_request: rdf_memory.YaraProcessScanRequest ) -> Iterator[List[streaming.Chunk]]: streamer = streaming.Streamer(chunk_size=scan_request.chunk_size, overlap_size=scan_request.overlap_size) batch = [] batch_size_bytes = 0 for region in client_utils.MemoryRegions(process, scan_request): chunks = streamer.StreamRanges(offset=region.start, amount=region.size) for chunk in chunks: batch.append(chunk) batch_size_bytes += chunk.amount if (len(batch) >= self.MAX_BATCH_SIZE_CHUNKS or batch_size_bytes >= scan_request.chunk_size): yield batch batch = [] batch_size_bytes = 0 if batch: yield batch
def DumpProcess(self, psutil_process, args): response = rdf_yara.YaraProcessDumpInformation() response.process = rdf_client.Process.FromPsutilProcess(psutil_process) process = client_utils.OpenProcessForMemoryAccess(pid=psutil_process.pid) bytes_limit = args.size_limit with process: streamer = streaming.MemoryStreamer(process, chunk_size=args.chunk_size) with tempfiles.TemporaryDirectory(cleanup=False) as tmp_dir: for start, length in client_utils.MemoryRegions(process, args): if bytes_limit and self.bytes_written + length > bytes_limit: response.error = ("Byte limit exceeded. Wrote %d bytes, " "next block is %d bytes, limit is %d." % (self.bytes_written, length, bytes_limit)) return response end = start + length filename = "%s_%d_%x_%x.tmp" % (psutil_process.name(), psutil_process.pid, start, end) filepath = os.path.join(tmp_dir.path, filename) bytes_written = self._SaveMemDumpToFilePath(filepath, streamer, start, length) if not bytes_written: continue self.bytes_written += bytes_written response.dump_files.Append( rdf_paths.PathSpec( path=filepath, pathtype=rdf_paths.PathSpec.PathType.TMPFILE)) return response