def Start(self): self.SendReply(rdfvalue.MemoryInformation( device=rdfvalue.PathSpec( path=os.path.join(config_lib.CONFIG["Test.data_dir"], "auth.log"), pathtype=rdfvalue.PathSpec.PathType.OS), runs=[rdfvalue.BufferReference(length=638976, offset=5), rdfvalue.BufferReference(length=145184, offset=643074)]))
def testNetgroupBufferParser(self): """Ensure we can extract users from a netgroup file.""" parser = linux_file_parser.NetgroupBufferParser() buf1 = rdfvalue.BufferReference(data="group1 (-,user1,) (-,user2,) " "(-,user3,)\n") buf2 = rdfvalue.BufferReference(data="super_group3 (-,user5,) (-,user6,)" " group1 group2\n") ff_result = rdfvalue.FileFinderResult(matches=[buf1, buf2]) config_lib.CONFIG.Set("Artifacts.netgroup_user_blacklist", ["user2", "user3"]) out = list(parser.Parse(ff_result, None)) self.assertItemsEqual([x.username for x in out], [u"user1", u"user5", u"user6"])
def ReadBuffer(self, args): _ = args return [ rdfvalue.BufferReference(data=mbr, offset=0, length=len(mbr)) ]
def testPasswdBufferParser(self): """Ensure we can extract users from a passwd file.""" parser = linux_file_parser.PasswdBufferParser() buf1 = rdfvalue.BufferReference(data="user1:x:1000:1000:User1" " Name,,,:/home/user1:/bin/bash\n") buf2 = rdfvalue.BufferReference(data="user2:x:1000:1000:User2" " Name,,,:/home/user2:/bin/bash\n") ff_result = file_finder.FileFinderResult(matches=[buf1, buf2]) out = list(parser.Parse(ff_result, None)) self.assertEqual(len(out), 2) self.assertTrue(isinstance(out[1], rdfvalue.KnowledgeBaseUser)) self.assertTrue(isinstance(out[1], rdfvalue.KnowledgeBaseUser)) self.assertEqual(out[0].username, "user1") self.assertEqual(out[0].full_name, "User1 Name,,,")
def TransferBuffer(self, responses): # Did it work? if not responses.success: raise IOError("Error running TransferBuffer: %s" % responses.status) response = responses.First() # Write the data we got from the client to the file. sparse_image = self.state.fd chunk_number = response.offset / sparse_image.chunksize sparse_image.AddBlob(blob_hash=response.data, length=response.length, chunk_number=chunk_number) sparse_image.Flush() length_to_read = min(self.state.chunksize, self.state.bytes_left_to_read) if length_to_read: request = rdfvalue.BufferReference(pathspec=self.state.pathspec, length=length_to_read, offset=self.state.current_offset) # TODO(user): Again, this is going to be too slow, since we're # waiting for a client response every time we request a buffer. We need to # queue up multiple reads. self.CallClient("TransferBuffer", request, next_state="TransferBuffer") # Move our offset along the file by how much we read. self.state.current_offset += length_to_read # Remember how much more we need to read. self.state.bytes_left_to_read = max( 0, self.state.bytes_left_to_read - length_to_read)
def setUp(self): super(TestNetworkByteLimits, self).setUp() pathspec = rdfvalue.PathSpec(path="/nothing", pathtype=rdfvalue.PathSpec.PathType.OS) self.buffer_ref = rdfvalue.BufferReference(pathspec=pathspec, length=5000) self.data = "X" * 500 self.old_read = standard.vfs.ReadVFS standard.vfs.ReadVFS = lambda x, y, z: self.data self.transfer_buf = test_lib.ActionMock("TransferBuffer")
def Start(self): """Schedules the ReadBuffer client action.""" pathspec = rdfvalue.PathSpec( path="\\\\.\\PhysicalDrive0\\", pathtype=rdfvalue.PathSpec.PathType.OS, path_options=rdfvalue.PathSpec.Options.CASE_LITERAL) request = rdfvalue.BufferReference(pathspec=pathspec, offset=0, length=self.args.length) self.CallClient("ReadBuffer", request, next_state="StoreMBR")
def testReadBuffer(self): """Test reading a buffer.""" path = os.path.join(self.base_path, "morenumbers.txt") p = rdfvalue.PathSpec(path=path, pathtype=rdfvalue.PathSpec.PathType.OS) result = self.RunAction( "ReadBuffer", rdfvalue.BufferReference(pathspec=p, offset=100, length=10))[0] self.assertEqual(result.offset, 100) self.assertEqual(result.length, 10) self.assertEqual(result.data, "7\n38\n39\n40")
def FetchWindow(self, number_of_chunks_to_readahead): """Read ahead a number of buffers to fill the window.""" for _ in range(number_of_chunks_to_readahead): # Do not read past the end of file if self.state.current_chunk_number > self.state.max_chunk_number: return request = rdfvalue.BufferReference( pathspec=self.args.pathspec, offset=self.state.current_chunk_number * self.CHUNK_SIZE, length=self.CHUNK_SIZE) self.CallClient("TransferBuffer", request, next_state="ReadBuffer") self.state.current_chunk_number += 1
def testBufferReferenceToExportedMatchConverter(self): buffer_reference = rdfvalue.BufferReference( offset=42, length=43, data="somedata", pathspec=rdfvalue.PathSpec(path="/some/path", pathtype=rdfvalue.PathSpec.PathType.OS)) metadata = rdfvalue.ExportedMetadata(client_urn="C.0000000000000001") converter = export.BufferReferenceToExportedMatchConverter() results = list(converter.Convert(metadata, buffer_reference, token=self.token)) self.assertEqual(len(results), 1) self.assertEqual(results[0].offset, 42) self.assertEqual(results[0].length, 43) self.assertEqual(results[0].data, "somedata") self.assertEqual( results[0].urn, rdfvalue.RDFURN("aff4:/C.0000000000000001/fs/os/some/path"))
def Start(self): urn = self.state.args.file_urn fd = aff4.FACTORY.Open(urn, token=self.token, aff4_type="AFF4SparseImage", mode="rw") self.state.Register("fd", fd) pathspec = fd.Get(fd.Schema.PATHSPEC) # Use the object's chunk size, in case it's different to the class-wide # chunk size. chunksize = fd.chunksize self.state.Register("pathspec", pathspec) self.state.Register("chunksize", chunksize) # Make sure we always read a whole number of chunks. new_length, new_offset = self.AlignToChunks(self.state.args.length, self.state.args.offset, chunksize) # Remember where we're up to in reading the file, and how much we have left # to read. self.state.Register("bytes_left_to_read", new_length) self.state.Register("current_offset", new_offset) # Always read one chunk at a time. request = rdfvalue.BufferReference(pathspec=self.state.pathspec, length=self.state.chunksize, offset=self.state.current_offset) # Remember where we're up to, and that we're about to read one chunk. self.state.bytes_left_to_read -= chunksize self.state.current_offset += chunksize self.CallClient("TransferBuffer", request, next_state="TransferBuffer")
def Grep(self, responses): if responses.success: # Grep not specified - just list all hits. if not self.args.grep: msgs = [ rdfvalue.BufferReference(pathspec=r.pathspec) for r in responses ] self.CallStateInline(messages=msgs, next_state="WriteHits") else: # Grep specification given, ask the client to grep the files. for response in responses: # Only fetch regular files here. if not stat.S_ISDIR(response.st_mode): # Cast the BareGrepSpec to a GrepSpec type. request = rdfvalue.GrepSpec(target=response.pathspec, **self.args.grep.AsDict()) self.CallClient( "Grep", request=request, next_state="WriteHits", request_data=dict(pathspec=response.pathspec))
def ReadBuffer(self, args): response = rdfvalue.BufferReference(args) response.data = "\x01" * response.length return [response]
def testFileFinderResultExportConverter(self): pathspec = rdfvalue.PathSpec(path="/some/path", pathtype=rdfvalue.PathSpec.PathType.OS) match1 = rdfvalue.BufferReference( offset=42, length=43, data="somedata1", pathspec=pathspec) match2 = rdfvalue.BufferReference( offset=44, length=45, data="somedata2", pathspec=pathspec) stat_entry = rdfvalue.StatEntry( aff4path=rdfvalue.RDFURN("aff4:/C.00000000000001/fs/os/some/path"), pathspec=pathspec, st_mode=33184, st_ino=1063090, st_atime=1336469177, st_mtime=1336129892, st_ctime=1336129892) file_finder_result = rdfvalue.FileFinderResult(stat_entry=stat_entry, matches=[match1, match2]) metadata = rdfvalue.ExportedMetadata(client_urn="C.0000000000000001") converter = export.FileFinderResultConverter() results = list(converter.Convert(metadata, file_finder_result, token=self.token)) # We expect 1 ExportedFile instances in the results exported_files = [result for result in results if isinstance(result, rdfvalue.ExportedFile)] self.assertEqual(len(exported_files), 1) self.assertEqual(exported_files[0].basename, "path") self.assertEqual(exported_files[0].urn, rdfvalue.RDFURN("aff4:/C.00000000000001/fs/os/some/path")) self.assertEqual(exported_files[0].st_mode, 33184) self.assertEqual(exported_files[0].st_ino, 1063090) self.assertEqual(exported_files[0].st_atime, 1336469177) self.assertEqual(exported_files[0].st_mtime, 1336129892) self.assertEqual(exported_files[0].st_ctime, 1336129892) self.assertFalse(exported_files[0].HasField("content")) self.assertFalse(exported_files[0].HasField("content_sha256")) self.assertFalse(exported_files[0].HasField("hash_md5")) self.assertFalse(exported_files[0].HasField("hash_sha1")) self.assertFalse(exported_files[0].HasField("hash_sha256")) # We expect 2 ExportedMatch instances in the results exported_matches = [result for result in results if isinstance(result, rdfvalue.ExportedMatch)] exported_matches = sorted(exported_matches, key=lambda x: x.offset) self.assertEqual(len(exported_matches), 2) self.assertEqual(exported_matches[0].offset, 42) self.assertEqual(exported_matches[0].length, 43) self.assertEqual(exported_matches[0].data, "somedata1") self.assertEqual( exported_matches[0].urn, rdfvalue.RDFURN("aff4:/C.0000000000000001/fs/os/some/path")) self.assertEqual(exported_matches[1].offset, 44) self.assertEqual(exported_matches[1].length, 45) self.assertEqual(exported_matches[1].data, "somedata2") self.assertEqual( exported_matches[1].urn, rdfvalue.RDFURN("aff4:/C.0000000000000001/fs/os/some/path"))
def GetBufferForChunk(self, chunk): chunk_offset = chunk * self.state.chunksize request = rdfvalue.BufferReference(pathspec=self.state.pathspec, length=self.state.chunksize, offset=chunk_offset) return request