def Run(self): # Fix the time to avoid regressions. with test_lib.FakeTime(42): client_id = self.SetupClient(0) flow_id = flow_test_lib.StartFlow( discovery.Interrogate, client_id=client_id, creator=self.token.username) replace = api_regression_test_lib.GetFlowTestReplaceDict( client_id, flow_id, "F:ABCDEF12") self.Check( "GetFlow", args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id), replace=replace) flow_base.TerminateFlow(client_id, flow_id, "Pending termination: Some reason") replace = api_regression_test_lib.GetFlowTestReplaceDict( client_id, flow_id, "F:ABCDEF13") # Fetch the same flow which is now should be marked as pending # termination. self.Check( "GetFlow", args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id), replace=replace)
def Run(self): # Fix the time to avoid regressions. with test_lib.FakeTime(42): client_id = self.SetupClient(0).Basename() if data_store.AFF4Enabled(): # Delete the certificates as it's being regenerated every time the # client is created. with aff4.FACTORY.Open( client_id, mode="rw", token=self.token) as client_obj: client_obj.DeleteAttribute(client_obj.Schema.CERT) flow_id = api_regression_test_lib.StartFlow( client_id, discovery.Interrogate, token=self.token) replace = api_regression_test_lib.GetFlowTestReplaceDict( client_id, flow_id, "F:ABCDEF12") self.Check( "GetFlow", args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id), replace=replace) flow_base.TerminateFlow(client_id, flow_id, "Pending termination: Some reason") replace = api_regression_test_lib.GetFlowTestReplaceDict( client_id, flow_id, "F:ABCDEF13") # Fetch the same flow which is now should be marked as pending # termination. self.Check( "GetFlow", args=flow_plugin.ApiGetFlowArgs(client_id=client_id, flow_id=flow_id), replace=replace)
def Handle(self, args, token=None): flow_base.TerminateFlow(str(args.client_id), str(args.flow_id), reason="Cancelled in GUI") flow_obj = data_store.REL_DB.ReadFlowObject(str(args.client_id), str(args.flow_id)) return ApiFlow().InitFromFlowObject(flow_obj)
def Run(self): client_urn = self.SetupClient(0) client_id = client_urn.Basename() acl_test_lib.CreateUser(self.token.username) # Create a running mock refresh operation. running_flow_id = self.CreateMultiGetFileFlow( client_urn, file_path="fs/os/c/bin/bash", token=self.token) # Create a mock refresh operation and complete it. finished_flow_id = self.CreateMultiGetFileFlow( client_urn, file_path="fs/os/c/bin/bash", token=self.token) if data_store.RelationalDBFlowsEnabled(): flow_base.TerminateFlow(client_id, finished_flow_id, reason="Fake Error") # Create an arbitrary flow to check on 404s. non_update_flow_id = flow.StartFlow( client_id=client_id, flow_cls=discovery.Interrogate) else: finished_flow_urn = client_urn.Add("flows").Add(finished_flow_id) with aff4.FACTORY.Open( finished_flow_urn, aff4_type=flow.GRRFlow, mode="rw", token=self.token) as flow_obj: flow_obj.GetRunner().Error("Fake error") # Create an arbitrary flow to check on 404s. non_update_flow_id = flow.StartAFF4Flow( client_id=client_urn, flow_name=discovery.Interrogate.__name__, token=self.token).Basename() # Unkonwn flow ids should also cause 404s. unknown_flow_id = "F:12345678" # Check both operations. self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=running_flow_id), replace={running_flow_id: "W:ABCDEF"}) self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=finished_flow_id), replace={finished_flow_id: "W:ABCDEF"}) self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=non_update_flow_id), replace={non_update_flow_id: "W:ABCDEF"}) self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=unknown_flow_id), replace={unknown_flow_id: "W:ABCDEF"})
def _TerminateFlow(self, flow_id): reason = "Because I said so" if data_store.RelationalDBFlowsEnabled(): flow_base.TerminateFlow(self.client_id, flow_id, reason) else: flow_urn = rdfvalue.RDFURN(self.client_id).Add("flows").Add(flow_id) flow.GRRFlow.TerminateAFF4Flow(flow_urn, reason, token=self.token)
def testHandlerReturnsCorrectStateForFlow(self): # Create a mock refresh operation. flow_id = self.CreateMultiGetFileFlow( self.client_id, file_path="fs/os/c/bin/bash", token=self.token) args = vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=self.client_id, operation_id=flow_id) # Flow was started and should be running. result = self.handler.Handle(args, token=self.token) self.assertEqual(result.state, "RUNNING") # Terminate flow. if data_store.RelationalDBFlowsEnabled(): flow_base.TerminateFlow(self.client_id.Basename(), flow_id, "Fake error") else: flow_urn = self.client_id.Add("flows").Add(flow_id) with aff4.FACTORY.Open( flow_urn, aff4_type=flow.GRRFlow, mode="rw", token=self.token) as flow_obj: flow_obj.GetRunner().Error("Fake error") # Recheck status and see if it changed. result = self.handler.Handle(args, token=self.token) self.assertEqual(result.state, "FINISHED")
def ProcessFlow( self, flow_processing_request: rdf_flows.FlowProcessingRequest) -> None: """The callback for the flow processing queue.""" client_id = flow_processing_request.client_id flow_id = flow_processing_request.flow_id data_store.REL_DB.AckFlowProcessingRequests([flow_processing_request]) try: rdf_flow = data_store.REL_DB.LeaseFlowForProcessing( client_id, flow_id, processing_time=rdfvalue.Duration.From(6, rdfvalue.HOURS)) except db.ParentHuntIsNotRunningError: flow_base.TerminateFlow(client_id, flow_id, "Parent hunt stopped.") return first_request_to_process = rdf_flow.next_request_to_process logging.info("Processing Flow %s/%s/%d (%s).", client_id, flow_id, first_request_to_process, rdf_flow.flow_class_name) flow_cls = registry.FlowRegistry.FlowClassByName( rdf_flow.flow_class_name) flow_obj = flow_cls(rdf_flow) if not flow_obj.IsRunning(): logging.info( "Received a request to process flow %s on client %s that is not " "running.", flow_id, client_id) return processed, incrementally_processed = flow_obj.ProcessAllReadyRequests() if processed == 0 and incrementally_processed == 0: raise FlowHasNothingToProcessError( "Unable to process any requests for flow %s on client %s." % (flow_id, client_id)) while not self._ReleaseProcessedFlow(flow_obj): processed, incrementally_processed = flow_obj.ProcessAllReadyRequests( ) if processed == 0 and incrementally_processed == 0: raise FlowHasNothingToProcessError( "%s/%s: ReleaseProcessedFlow returned false but no " "request could be processed (next req: %d)." % (client_id, flow_id, flow_obj.rdf_flow.next_request_to_process)) if flow_obj.IsRunning(): logging.info( "Processing Flow %s/%s/%d (%s) done, next request to process: %d.", client_id, flow_id, first_request_to_process, rdf_flow.flow_class_name, rdf_flow.next_request_to_process) else: logging.info("Processing Flow %s/%s/%d (%s) done, flow is done.", client_id, flow_id, first_request_to_process, rdf_flow.flow_class_name)
def ProcessFlow(): time.sleep(1) if data_store.RelationalDBFlowsEnabled(): flow_base.TerminateFlow(client_urn.Basename(), flow_id, "") else: with aff4.FACTORY.Open(client_urn.Add("flows").Add(flow_id), mode="rw", token=self.token) as fd: fd.GetRunner().Error("")
def Handle(self, args, token=None): reason = "Cancelled in GUI" if data_store.RelationalDBFlowsEnabled(): flow_base.TerminateFlow( str(args.client_id), str(args.flow_id), reason=reason) else: flow_urn = args.flow_id.ResolveClientFlowURN(args.client_id, token=token) flow.GRRFlow.TerminateAFF4Flow(flow_urn, reason=reason, token=token)
def _TerminateFlow(self, client_id, flow_id): if data_store.RelationalDBFlowsEnabled(): reason = "Pending termination: Some reason" flow_base.TerminateFlow(client_id, flow_id, reason) else: reason = "Some reason" flow_urn = rdfvalue.RDFURN(client_id).Add("flows").Add(flow_id) with data_store.DB.GetMutationPool() as pool: flow.GRRFlow.MarkForTermination( flow_urn, reason=reason, mutation_pool=pool)
def GenerateNotifications(cls, client_id, token): """Generates fake notifications of different notification types.""" session_id = flow_test_lib.StartFlow(discovery.Interrogate, client_id=client_id, creator=token.username) notification.Notify( token.username, rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED, "Fake discovery message", rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.CLIENT, client=rdf_objects.ClientReference( client_id=client_id.Basename()))) # ViewObject: VirtualFileSystem notification.Notify( token.username, rdf_objects.UserNotification.Type.TYPE_VFS_FILE_COLLECTED, "File fetch completed", rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.VFS_FILE, vfs_file=rdf_objects.VfsFileReference( client_id=client_id.Basename(), path_type=rdf_objects.PathInfo.PathType.OS, path_components=["proc", "10", "exe"]))) gui_test_lib.CreateFileVersion(client_id, "fs/os/proc/10/exe", b"", timestamp=gui_test_lib.TIME_0, token=token) # ViewObject: Flow notification.Notify( token.username, rdf_objects.UserNotification.Type.TYPE_FLOW_RUN_COMPLETED, "Fake view flow message", rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.FLOW, flow=rdf_objects.FlowReference(client_id=client_id.Basename(), flow_id=session_id))) # FlowError if data_store.RelationalDBEnabled(): flow_base.TerminateFlow(client_id.Basename(), session_id, "Fake flow error") else: with aff4.FACTORY.Open(client_id.Add("flows").Add(session_id), mode="rw", token=token) as flow_obj: flow_obj.GetRunner().Error("Fake flow error") return session_id
def testHandlerReturnsCorrectStateForFlow(self): # Create a mock refresh operation. flow_id = self.CreateRecursiveListFlow(self.client_id) args = vfs_plugin.ApiGetVfsRefreshOperationStateArgs( client_id=self.client_id, operation_id=flow_id) # Flow was started and should be running. result = self.handler.Handle(args, context=self.context) self.assertEqual(result.state, "RUNNING") # Terminate flow. flow_base.TerminateFlow(self.client_id, flow_id, "Fake error") # Recheck status and see if it changed. result = self.handler.Handle(args, context=self.context) self.assertEqual(result.state, "FINISHED")
def GenerateNotifications(cls, client_id, username): """Generates fake notifications of different notification types.""" session_id = flow_test_lib.StartFlow(discovery.Interrogate, client_id=client_id, creator=username) notification.Notify( username, rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED, "Fake discovery message", rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.CLIENT, client=rdf_objects.ClientReference(client_id=client_id))) # ViewObject: VirtualFileSystem notification.Notify( username, rdf_objects.UserNotification.Type.TYPE_VFS_FILE_COLLECTED, "File fetch completed", rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.VFS_FILE, vfs_file=rdf_objects.VfsFileReference( client_id=client_id, path_type=rdf_objects.PathInfo.PathType.OS, path_components=["proc", "10", "exe"]))) gui_test_lib.CreateFileVersion(client_id, "fs/os/proc/10/exe", b"", timestamp=gui_test_lib.TIME_0) # ViewObject: Flow notification.Notify( username, rdf_objects.UserNotification.Type.TYPE_FLOW_RUN_COMPLETED, "Fake view flow message", rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.FLOW, flow=rdf_objects.FlowReference(client_id=client_id, flow_id=session_id))) # FlowError flow_base.TerminateFlow(client_id, session_id, "Fake flow error") return session_id
def testHandlerReturnsCorrectStateForFlow(self): # Create a mock refresh operation. flow_id = self.CreateMultiGetFileFlow( self.client_id, file_path="fs/os/c/bin/bash") args = vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=self.client_id, operation_id=flow_id) # Flow was started and should be running. result = self.handler.Handle(args, context=self.context) self.assertEqual(result.state, "RUNNING") # Terminate flow. flow_base.TerminateFlow(self.client_id, flow_id, "Fake error") # Recheck status and see if it changed. result = self.handler.Handle(args, context=self.context) self.assertEqual(result.state, "FINISHED")
def testChildTermination(self): flow_id = flow.StartFlow(flow_cls=CallClientParentFlow, client_id=self.client_id) flow_obj = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id) client_flow_obj = data_store.REL_DB.ReadChildFlowObjects( self.client_id, flow_id)[0] self.assertEqual(flow_obj.flow_state, "RUNNING") self.assertEqual(client_flow_obj.flow_state, "RUNNING") # Terminate the parent flow. flow_base.TerminateFlow(self.client_id, flow_id, reason="Testing") flow_obj = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id) client_flow_obj = data_store.REL_DB.ReadChildFlowObjects( self.client_id, flow_id)[0] self.assertEqual(flow_obj.flow_state, "ERROR") self.assertEqual(client_flow_obj.flow_state, "ERROR")
def Run(self): client_id = self.SetupClient(0) acl_test_lib.CreateUser(self.test_username) # Create a running mock refresh operation. running_flow_id = self.CreateMultiGetFileFlow( client_id, file_path="fs/os/c/bin/bash") # Create a mock refresh operation and complete it. finished_flow_id = self.CreateMultiGetFileFlow( client_id, file_path="fs/os/c/bin/bash") flow_base.TerminateFlow(client_id, finished_flow_id, reason="Fake Error") # Create an arbitrary flow to check on 404s. non_update_flow_id = flow.StartFlow( client_id=client_id, flow_cls=discovery.Interrogate) # Unknown flow ids should also cause 404s. unknown_flow_id = "F:12345678" # Check both operations. self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=running_flow_id), replace={running_flow_id: "ABCDEF"}) self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=finished_flow_id), replace={finished_flow_id: "ABCDEF"}) self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=non_update_flow_id), replace={non_update_flow_id: "ABCDEF"}) self.Check( "GetVfsFileContentUpdateState", args=vfs_plugin.ApiGetVfsFileContentUpdateStateArgs( client_id=client_id, operation_id=unknown_flow_id), replace={unknown_flow_id: "ABCDEF"})
def ProcessFlow(self, flow_processing_request): """The callback for the flow processing queue.""" client_id = flow_processing_request.client_id flow_id = flow_processing_request.flow_id logging.info("Processing flow %s/%s.", client_id, flow_id) data_store.REL_DB.AckFlowProcessingRequests([flow_processing_request]) try: rdf_flow = data_store.REL_DB.ReadFlowForProcessing( client_id, flow_id, processing_time=rdfvalue.Duration("6h")) except db.ParentHuntIsNotRunningError: flow_base.TerminateFlow(client_id, flow_id, "Parent hunt stopped.") return flow_cls = registry.FlowRegistry.FlowClassByName( rdf_flow.flow_class_name) flow_obj = flow_cls(rdf_flow) if not flow_obj.IsRunning(): raise ValueError( "Received a request to process flow %s on client %s that is not " "running." % (flow_id, client_id)) processed = flow_obj.ProcessAllReadyRequests() if processed == 0: raise ValueError( "Unable to process any requests for flow %s on client %s." % (flow_id, client_id)) while not self._ReturnProcessedFlow(flow_obj): processed = flow_obj.ProcessAllReadyRequests() if processed == 0: raise ValueError( "%s/%s: ReturnProcessedFlow returned false but no " "request could be processed (next req: %d)." % (client_id, flow_id, flow_obj.rdf_flow.next_request_to_process))
def ProcessFlow(): time.sleep(1) flow_base.TerminateFlow(client_id, flow_id, "")
def _TerminateFlow(self, flow_id): reason = "Because I said so" flow_base.TerminateFlow(self.client_id, flow_id, reason)
def ProcessEvents(self, msgs=None, publisher_username=None): """Processes this event.""" nanny_msg = "" for crash_details in msgs: client_urn = crash_details.client_id client_id = client_urn.Basename() # The session id of the flow that crashed. session_id = crash_details.session_id # Log. logging.info("Client crash reported, client %s.", client_urn) # Export. GRR_CLIENT_CRASHES.Increment() # Write crash data. client = data_store.REL_DB.ReadClientSnapshot(client_id) if client: crash_details.client_info = client.startup_info.client_info hostname = client.knowledge_base.fqdn else: hostname = "" crash_details.crash_type = "Client Crash" if nanny_msg: termination_msg = "Client crashed, " + nanny_msg else: termination_msg = "Client crashed." # Terminate the flow. flow_id = session_id.Basename() flow_base.TerminateFlow( client_id, flow_id, reason=termination_msg, flow_state=rdf_flow_objects.Flow.FlowState.CRASHED) WriteAllCrashDetails(client_id, crash_details, flow_session_id=session_id) # Also send email. email_address = config.CONFIG["Monitoring.alert_email"] if not email_address: return if crash_details.nanny_status: nanny_msg = "Nanny status: %s" % crash_details.nanny_status body = self.__class__.mail_template.render( client_id=client_id, admin_ui=config.CONFIG["AdminUI.url"], hostname=utils.SmartUnicode(hostname), url="/clients/%s" % client_id, nanny_msg=utils.SmartUnicode(nanny_msg), signature=config.CONFIG["Email.signature"]) try: email_alerts.EMAIL_ALERTER.SendEmail( email_address, "GRR server", "Client %s reported a crash." % client_id, body, is_html=True) except email_alerts.EmailNotSentError as e: # We have already written the crash details to the DB, so failing # to send an email isn't super-critical. logging.warning(e)
def Handle(self, args, token=None): flow_base.TerminateFlow(str(args.client_id), str(args.flow_id), reason="Cancelled in GUI")
def ProcessMessages(self, msgs=None, token=None): """Processes this event.""" nanny_msg = "" for crash_details in msgs: client_urn = crash_details.client_id client_id = client_urn.Basename() # The session id of the flow that crashed. session_id = crash_details.session_id # Log. logging.info("Client crash reported, client %s.", client_urn) # Export. stats_collector_instance.Get().IncrementCounter( "grr_client_crashes") # Write crash data. if data_store.RelationalDBWriteEnabled(): client = data_store.REL_DB.ReadClientSnapshot(client_id) if client: crash_details.client_info = client.startup_info.client_info hostname = client.knowledge_base.fqdn else: hostname = "" if data_store.AFF4Enabled(): client = aff4.FACTORY.Open(client_urn, token=token) client_info = client.Get(client.Schema.CLIENT_INFO) hostname = client.Get(client.Schema.FQDN) if client_info: crash_details.client_info = client_info crash_details.crash_type = "Client Crash" WriteAllCrashDetails(client_id, crash_details, flow_session_id=session_id, token=token) # Also send email. to_send = [] try: hunt_session_id = ExtractHuntId(session_id) if hunt_session_id and hunt_session_id != session_id: # TODO(amoser): Enable this for the relational db once we have hunt # metadata. if data_store.AFF4Enabled(): hunt_obj = aff4.FACTORY.Open( hunt_session_id, aff4_type=implementation.GRRHunt, token=token) email = hunt_obj.runner_args.crash_alert_email if email: to_send.append(email) except aff4.InstantiationError: logging.error("Failed to open hunt %s.", hunt_session_id) email = config.CONFIG["Monitoring.alert_email"] if email: to_send.append(email) if nanny_msg: termination_msg = "Client crashed, " + nanny_msg else: termination_msg = "Client crashed." for email_address in to_send: if crash_details.nanny_status: nanny_msg = "Nanny status: %s" % crash_details.nanny_status body = self.__class__.mail_template.render( client_id=client_id, admin_ui=config.CONFIG["AdminUI.url"], hostname=utils.SmartUnicode(hostname), url="/clients/%s" % client_id, nanny_msg=utils.SmartUnicode(nanny_msg), signature=config.CONFIG["Email.signature"]) email_alerts.EMAIL_ALERTER.SendEmail( email_address, "GRR server", "Client %s reported a crash." % client_id, utils.SmartStr(body), is_html=True) # Now terminate the flow. if data_store.RelationalDBFlowsEnabled(): flow_id = session_id.Basename() flow_base.TerminateFlow(client_id, flow_id, reason=termination_msg) else: flow.GRRFlow.TerminateAFF4Flow(session_id, reason=termination_msg, token=token)