def Run(self, unused_args): for f in ["/var/log/CDIS.custom", "/var", "/private"]: try: stat = os.stat(f) self.SendReply(rdf_protodict.DataBlob(integer=int(stat.st_ctime))) return except OSError: pass self.SendReply(rdf_protodict.DataBlob(integer=0))
def testRdfFormatterHandlesKeyValuePair(self): """rdfvalue.KeyValue items need special handling to expand k and v.""" key = rdf_protodict.DataBlob().SetValue("skynet") value = rdf_protodict.DataBlob().SetValue([1997]) rdf = rdf_protodict.KeyValue(k=key, v=value) template = "{k}: {v}" hinter = hints.Hinter(template=template) expected = "skynet: 1997" result = hinter.Render(rdf) self.assertEqual(expected, result)
def Run(self, unused_arg): """This kills us with no cleanups.""" logging.debug("Disabling service") win32serviceutil.ChangeServiceConfig( None, config.CONFIG["Nanny.service_name"], startType=win32service.SERVICE_DISABLED) svc_config = QueryService(config.CONFIG["Nanny.service_name"]) if svc_config[1] == win32service.SERVICE_DISABLED: logging.info("Disabled service successfully") self.SendReply(rdf_protodict.DataBlob(string="Service disabled.")) else: self.SendReply( rdf_protodict.DataBlob(string="Service failed to disable."))
def testNannyMessage(self): nanny_message = "Oh no!" self.email_message = {} def SendEmail(address, sender, title, message, **_): self.email_message.update(dict(address=address, sender=sender, title=title, message=message)) with utils.Stubber(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmail): msg = rdf_flows.GrrMessage( session_id=rdfvalue.SessionID(flow_name="NannyMessage"), payload=rdf_protodict.DataBlob(string=nanny_message), source=self.client_id, auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED) # This is normally done by the FrontEnd when a CLIENT_KILLED message is # received. flow.Events.PublishEvent("NannyMessage", msg, token=self.token) # Now emulate a worker to process the event. worker = test_lib.MockWorker(token=self.token) while worker.Next(): pass worker.pool.Join() # We expect the email to be sent. self.assertEqual( self.email_message.get("address"), config_lib.CONFIG["Monitoring.alert_email"]) self.assertTrue(str(self.client_id) in self.email_message["title"]) # Make sure the message is included in the email message. self.assertTrue(nanny_message in self.email_message["message"]) # Make sure crashes RDFValueCollections are created and written # into proper locations. First check the per-client crashes collection. client_crashes = list(aff4.FACTORY.Open( self.client_id.Add("crashes"), aff4_type=collects.PackedVersionedCollection, token=self.token)) self.assertEqual(len(client_crashes), 1) crash = client_crashes[0] self.assertEqual(crash.client_id, self.client_id) self.assertEqual(crash.client_info.client_name, "GRR Monitor") self.assertEqual( crash.crash_type, "aff4:/flows/" + queues.FLOWS.Basename() + ":NannyMessage") self.assertEqual(crash.crash_message, nanny_message) # Check global crash collection. Check that crash written there is # equal to per-client crash. global_crashes = list(aff4.FACTORY.Open( aff4.ROOT_URN.Add("crashes"), aff4_type=collects.PackedVersionedCollection, token=self.token)) self.assertEqual(len(global_crashes), 1) self.assertEqual(global_crashes[0], crash)
def Start(self): for i in range(10): self.CallClient( client_test_lib.Test, rdf_protodict.DataBlob(string="test%s" % i), data=str(i), next_state="Incoming")
def Run(self, args): """Reads a buffer on the client and sends it to the server.""" # Make sure we limit the size of our output if args.length > constants.CLIENT_MAX_BUFFER_SIZE: raise RuntimeError("Can not read buffers this large.") data = vfs.ReadVFS(args.pathspec, args.offset, args.length, progress_callback=self.Progress) result = rdf_protodict.DataBlob( data=zlib.compress(data), compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION) digest = hashlib.sha256(data).digest() # Ensure that the buffer is counted against this response. Check network # send limit. self.ChargeBytesToSession(len(data)) # Now return the data to the server into the special TransferStore well # known flow. self.grr_worker.SendReply( result, session_id=rdfvalue.SessionID(flow_name="TransferStore")) # Now report the hash of this blob to our flow as well as the offset and # length. self.SendReply( rdf_client.BufferReference(offset=args.offset, length=len(data), data=digest))
def _MakeRegStat(self, path, value, registry_type): options = rdf_paths.PathSpec.Options.CASE_LITERAL pathspec = rdf_paths.PathSpec( path=path, path_options=options, pathtype=rdf_paths.PathSpec.PathType.REGISTRY) if registry_type == rdf_client.StatEntry.RegistryType.REG_MULTI_SZ: reg_data = rdf_protodict.DataBlob(list=rdf_protodict.BlobArray( content=rdf_protodict.DataBlob(string=value))) else: reg_data = rdf_protodict.DataBlob().SetValue(value) return rdf_client.StatEntry(pathspec=pathspec, registry_data=reg_data, registry_type=registry_type)
def ProcessMessage(self, message): """Write the blob into the AFF4 blob storage area.""" # Check that the message is authenticated if (message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): logging.error("TransferStore request from %s is not authenticated.", message.source) return read_buffer = rdf_protodict.DataBlob(message.payload) # Only store non empty buffers if read_buffer.data: data = read_buffer.data if (read_buffer.compression == rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION): cdata = data data = zlib.decompress(cdata) elif (read_buffer.compression == rdf_protodict.DataBlob.CompressionType.UNCOMPRESSED): cdata = zlib.compress(data) else: raise RuntimeError("Unsupported compression") # The hash is done on the uncompressed data digest = hashlib.sha256(data).digest() urn = rdfvalue.RDFURN("aff4:/blobs").Add(digest.encode("hex")) fd = aff4.FACTORY.Create(urn, "AFF4MemoryStream", mode="w", token=self.token) fd.OverwriteAndClose(cdata, len(data), sync=True) logging.debug("Got blob %s (length %s)", digest.encode("hex"), len(cdata))
def Run(self, unused_arg): """This kills us with no cleanups.""" logging.debug("Disabling service") msg = "Service disabled." if hasattr(sys, "frozen"): grr_binary = os.path.abspath(sys.executable) elif __file__: grr_binary = os.path.abspath(__file__) try: os.remove(grr_binary) except OSError: msg = "Could not remove binary." try: os.remove(config.CONFIG["Client.plist_path"]) except OSError: if "Could not" in msg: msg += " Could not remove plist file." else: msg = "Could not remove plist file." # Get the directory we are running in from pyinstaller. This is either the # GRR directory which we should delete (onedir mode) or a generated temp # directory which we can delete without problems in onefile mode. directory = getattr(sys, "_MEIPASS", None) if directory: shutil.rmtree(directory, ignore_errors=True) self.SendReply(rdf_protodict.DataBlob(string=msg))
def SendResponse(self, session_id, data, client_id=None, well_known=False): if not isinstance(data, rdfvalue.RDFValue): data = rdf_protodict.DataBlob(string=data) if well_known: request_id, response_id = 0, 12345 else: request_id, response_id = 1, 1 with queue_manager.QueueManager(token=self.token) as flow_manager: flow_manager.QueueResponse( session_id, rdf_flows.GrrMessage(source=client_id, session_id=session_id, payload=data, request_id=request_id, response_id=response_id)) if not well_known: # For normal flows we have to send a status as well. flow_manager.QueueResponse( session_id, rdf_flows.GrrMessage( source=client_id, session_id=session_id, payload=rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK), request_id=request_id, response_id=response_id + 1, type=rdf_flows.GrrMessage.Type.STATUS)) flow_manager.QueueNotification(session_id=session_id) timestamp = flow_manager.frozen_timestamp return timestamp
def _ForemanOp(self): """Sends Foreman checks periodically.""" period = config.CONFIG["Client.foreman_check_frequency"] self._threads["Worker"].SendReply( rdf_protodict.DataBlob(), session_id=rdfvalue.FlowSessionID(flow_name="Foreman"), require_fastpoll=False) time.sleep(period)
def SendNannyMessage(self): msg = self.nanny_controller.GetNannyMessage() if msg: self.SendReply( rdf_protodict.DataBlob(string=msg), session_id=rdfvalue.FlowSessionID(flow_name="NannyMessage"), priority=rdf_flows.GrrMessage.Priority.LOW_PRIORITY, require_fastpoll=False) self.nanny_controller.ClearNannyMessage()
def _ForemanCheckerThread(self): """Sends Foreman checks periodically.""" period = config.CONFIG["Client.foreman_check_frequency"] while True: self._client_worker.SendReply( rdf_protodict.DataBlob(), session_id=rdfvalue.FlowSessionID(flow_name="Foreman"), priority=rdf_flows.GrrMessage.Priority.LOW_PRIORITY) time.sleep(period)
def Run(self, unused_args): """Estimate the install date of this system.""" # Don't use _winreg.KEY_WOW64_64KEY since it breaks on Windows 2000 subkey = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Windows NT\\CurrentVersion", 0, _winreg.KEY_READ) install_date = _winreg.QueryValueEx(subkey, "InstallDate") self.SendReply(rdf_protodict.DataBlob(integer=install_date[0]))
def MakeRegistryStatEntry(self, path, value): options = rdf_paths.PathSpec.Options.CASE_LITERAL pathspec = rdf_paths.PathSpec( path=path, path_options=options, pathtype=rdf_paths.PathSpec.PathType.REGISTRY) return rdf_client.StatEntry( pathspec=pathspec, registry_data=rdf_protodict.DataBlob().SetValue(value), registry_type=rdf_client.StatEntry.RegistryType.REG_SZ)
def UnlockSubject(self, subject, transid): """Unlocks subject using lock id.""" request = rdf_data_store.DataStoreRequest(subject=[subject]) blob = rdf_protodict.DataBlob(string=transid) value = rdf_data_store.DataStoreValue(value=blob) request.values.Append(value) # We do not care about the server response. typ = rdf_data_server.DataStoreCommand.Command.UNLOCK_SUBJECT self._MakeSyncRequest(request, typ) return transid
def testEqualTimestampNotifications(self): frontend_server = front_end.FrontEndServer( certificate=config.CONFIG["Frontend.certificate"], private_key=config.CONFIG["PrivateKeys.server_key"], message_expiry_time=100, threadpool_prefix="notification-test") # This schedules 10 requests. session_id = flow.GRRFlow.StartFlow( client_id=self.client_id, flow_name="WorkerSendingTestFlow", token=self.token) # We pretend that the client processed all the 10 requests at once and # sends the replies in a single http poll. messages = [ rdf_flows.GrrMessage( request_id=i, response_id=1, session_id=session_id, payload=rdf_protodict.DataBlob(string="test%s" % i), auth_state="AUTHENTICATED", generate_task_id=True) for i in range(1, 11) ] status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) statuses = [ rdf_flows.GrrMessage( request_id=i, response_id=2, session_id=session_id, payload=status, type=rdf_flows.GrrMessage.Type.STATUS, auth_state="AUTHENTICATED", generate_task_id=True) for i in range(1, 11) ] frontend_server.ReceiveMessages(self.client_id, messages + statuses) with queue_manager.QueueManager(token=self.token) as q: all_notifications = q.GetNotificationsByPriorityForAllShards( rdfvalue.RDFURN("aff4:/F")) medium_priority = rdf_flows.GrrNotification.Priority.MEDIUM_PRIORITY medium_notifications = all_notifications[medium_priority] my_notifications = [ n for n in medium_notifications if n.session_id == session_id ] # There must not be more than one notification. self.assertEqual(len(my_notifications), 1) notification = my_notifications[0] self.assertEqual(notification.first_queued, notification.timestamp) self.assertEqual(notification.last_status, 10)
def UnlockSubject(self, subject, transid, token): """Unlocks subject using transaction id.""" self.security_manager.CheckDataStoreAccess(token, [subject], "w") request = rdf_data_store.DataStoreRequest(subject=[subject]) if token: request.token = token blob = rdf_protodict.DataBlob(string=transid) value = rdf_data_store.DataStoreValue(value=blob) request.values.Append(value) # We do not care about the server response. typ = rdf_data_server.DataStoreCommand.Command.UNLOCK_SUBJECT self._MakeSyncRequest(request, typ) return transid
def ProcessFileStats(self, responses): """Extract DataBlob from Stat response.""" if not responses.success: return system_root_paths = ["Windows", "WinNT", "WINNT35", "WTSRV", "WINDOWS"] for response in responses: if response.pathspec.path[4:] in system_root_paths: systemdrive = response.pathspec.path[1:3] systemroot = "%s\\%s" % (systemdrive, response.pathspec.path[4:]) # Put the data back into the original format expected for the artifact data = rdf_protodict.DataBlob().SetValue(systemroot) self.SendReply(rdf_client.StatEntry(registry_data=data)) self.state.success = True break
def testParse(self): parser = windows_persistence.WindowsPersistenceMechanismsParser() path = (r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion" r"\Run\test") pathspec = rdf_paths.PathSpec( path=path, pathtype=rdf_paths.PathSpec.PathType.REGISTRY) reg_data = "C:\\blah\\some.exe /v" reg_type = rdf_client.StatEntry.RegistryType.REG_SZ stat = rdf_client.StatEntry( aff4path="aff4:/asdfasdf/", pathspec=pathspec, registry_type=reg_type, registry_data=rdf_protodict.DataBlob(string=reg_data)) persistence = [stat] image_paths = [ "system32\\drivers\\ACPI.sys", "%systemroot%\\system32\\svchost.exe -k netsvcs", "\\SystemRoot\\system32\\drivers\\acpipmi.sys" ] reg_key = rdfvalue.RDFURN("aff4:/C.1000000000000000/registry" "/HKEY_LOCAL_MACHINE/SYSTEM/ControlSet001" "/services/AcpiPmi") for path in image_paths: serv_info = rdf_client.WindowsServiceInformation( name="blah", display_name="GRRservice", image_path=path, registry_key=reg_key) persistence.append(serv_info) knowledge_base = rdf_client.KnowledgeBase() knowledge_base.environ_systemroot = "C:\\Windows" expected = [ "C:\\blah\\some.exe", "C:\\Windows\\system32\\drivers\\ACPI.sys", "C:\\Windows\\system32\\svchost.exe", "C:\\Windows\\system32\\drivers\\acpipmi.sys" ] for index, item in enumerate(persistence): results = list( parser.Parse(item, knowledge_base, rdf_paths.PathSpec.PathType.OS)) self.assertEqual(results[0].pathspec.path, expected[index]) self.assertEqual(len(results), 1)
def ExtendSubjectLock(self, subject, transid, lease_time): """Extends lock of subject.""" request = rdf_data_store.DataStoreRequest(subject=[subject]) specific = rdf_data_store.TimestampSpec.Type.SPECIFIC_TIME request.timestamp = rdf_data_store.TimestampSpec(start=lease_time, type=specific) blob = rdf_protodict.DataBlob(string=transid) value = rdf_data_store.DataStoreValue(value=blob) request.values.Append(value) typ = rdf_data_server.DataStoreCommand.Command.EXTEND_SUBJECT response = self._MakeSyncRequest(request, typ) if not response.results: return None result = response.results[0] if not result.values: return None value = result.values[0].value.string return transid if transid == value else None
def _ScheduleResponseAndStatus(self, client_id, flow_id): with queue_manager.QueueManager(token=self.token) as flow_manager: # Schedule a response. flow_manager.QueueResponse( rdf_flows.GrrMessage( source=client_id, session_id=flow_id, payload=rdf_protodict.DataBlob(string="Helllo"), request_id=1, response_id=1)) # And a STATUS message. flow_manager.QueueResponse( rdf_flows.GrrMessage( source=client_id, session_id=flow_id, payload=rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK), request_id=1, response_id=2, type=rdf_flows.GrrMessage.Type.STATUS))
def _Stat(self, name, value, value_type, mtime=None): response = rdf_client.StatEntry() response_pathspec = self.pathspec.Copy() # No matter how we got here, there is no need to do case folding from now on # since this is the exact filename casing. response_pathspec.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL response_pathspec.last.path = utils.JoinPath(response_pathspec.last.path, name) response.pathspec = response_pathspec if self.IsDirectory(): response.st_mode = stat.S_IFDIR else: response.st_mode = stat.S_IFREG if mtime: response.st_mtime = mtime response.st_size = len(utils.SmartStr(value)) if value_type is not None: response.registry_type = self.registry_map.get(value_type, 0) response.registry_data = rdf_protodict.DataBlob().SetValue(value) return response
def testNoValidStatusRaceIsResolved(self): # This tests for the regression of a long standing race condition we saw # where notifications would trigger the reading of another request that # arrives later but wasn't completely written to the database yet. # Timestamp based notification handling should eliminate this bug. # We need a random flow object for this test. session_id = flow.GRRFlow.StartFlow(client_id=self.client_id, flow_name="WorkerSendingTestFlow", token=self.token) worker_obj = worker.GRRWorker(token=self.token) manager = queue_manager.QueueManager(token=self.token) manager.DeleteNotification(session_id) manager.Flush() # We have a first request that is complete (request_id 1, response_id 1). self.SendResponse(session_id, "Response 1") # However, we also have request #2 already coming in. The race is that # the queue manager might write the status notification to # session_id/state as "status:00000002" but not the status response # itself yet under session_id/state/request:00000002 request_id = 2 response_id = 1 flow_manager = queue_manager.QueueManager(token=self.token) flow_manager.FreezeTimestamp() flow_manager.QueueResponse( rdf_flows.GrrMessage( source=self.client_id, session_id=session_id, payload=rdf_protodict.DataBlob(string="Response 2"), request_id=request_id, response_id=response_id)) status = rdf_flows.GrrMessage( source=self.client_id, session_id=session_id, payload=rdf_flows.GrrStatus( status=rdf_flows.GrrStatus.ReturnedStatus.OK), request_id=request_id, response_id=response_id + 1, type=rdf_flows.GrrMessage.Type.STATUS) # Now we write half the status information. data_store.DB.StoreRequestsAndResponses(new_responses=[(status, None)]) # We make the race even a bit harder by saying the new notification gets # written right before the old one gets deleted. If we are not careful here, # we delete the new notification as well and the flow becomes stuck. def WriteNotification(self, arg_session_id, start=None, end=None): if arg_session_id == session_id: flow_manager.QueueNotification(session_id=arg_session_id) flow_manager.Flush() self.DeleteNotification.old_target(self, arg_session_id, start=start, end=end) with utils.Stubber(queue_manager.QueueManager, "DeleteNotification", WriteNotification): # This should process request 1 but not touch request 2. worker_obj.RunOnce() worker_obj.thread_pool.Join() flow_obj = aff4.FACTORY.Open(session_id, token=self.token) self.assertFalse(flow_obj.context.backtrace) self.assertNotEqual(flow_obj.context.state, rdf_flows.FlowContext.State.ERROR) request_data = data_store.DB.ReadResponsesForRequestId(session_id, 2) request_data.sort(key=lambda msg: msg.response_id) self.assertEqual(len(request_data), 2) # Make sure the status and the original request are still there. self.assertEqual(request_data[0].args_rdf_name, "DataBlob") self.assertEqual(request_data[1].args_rdf_name, "GrrStatus") # But there is nothing for request 1. request_data = data_store.DB.ReadResponsesForRequestId(session_id, 1) self.assertEqual(request_data, []) # The notification for request 2 should have survived. with queue_manager.QueueManager(token=self.token) as manager: notifications = manager.GetNotifications(queues.FLOWS) self.assertEqual(len(notifications), 1) notification = notifications[0] self.assertEqual(notification.session_id, session_id) self.assertEqual(notification.timestamp, flow_manager.frozen_timestamp) self.assertEqual(RESULTS, ["Response 1"]) # The last missing piece of request 2 is the actual status message. flow_manager.QueueResponse(status) flow_manager.Flush() # Now make sure request 2 runs as expected. worker_obj.RunOnce() worker_obj.thread_pool.Join() self.assertEqual(RESULTS, ["Response 1", "Response 2"])
def ReturnBlob(self, unused_args): return [rdf_protodict.DataBlob(integer=100)]
def Store(self, data): self.storage.append(self.in_rdfvalue(data).string) return [rdf_protodict.DataBlob(string="Hello World")]
def Run(self, unused_args): self.SendReply(rdf_protodict.DataBlob(string=socket.gethostname()))
def _CompressedDataBlob(chunk): return rdf_protodict.DataBlob( data=zlib.compress(chunk.data), compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)
def Start(self): data = rdf_protodict.DataBlob().SetValue("All Users") self.SendReply(rdf_client.StatEntry(registry_data=data)) self.state.success = True
def GetInstallDate(self, _): self.response_count += 1 return [rdf_protodict.DataBlob(integer=100)]