Exemple #1
0
    def RunOnce(self):
        if config.CONFIG["Rekall.enabled"]:
            b = flow.FlowBehaviour("BASIC")
        else:
            b = flow.FlowBehaviour("DEBUG")

        # Mark Rekall-based flows as basic, so that they appear in the UI.
        for cls in [MemoryCollector, AnalyzeClientMemory, ListVADBinaries]:
            cls.behaviours = b
Exemple #2
0
class MemoryCollector(flow.GRRFlow):
    """Flow for scanning and imaging memory.

  MemoryCollector applies "action" (e.g. Download) to memory if memory contents
  match all given "conditions". Matches are then written to the results
  collection. If there are no "conditions", "action" is applied immediately.
  """
    friendly_name = "Memory Collector"
    category = "/Memory/"
    behaviours = flow.FlowBehaviour("DEBUG")
    args_type = MemoryCollectorArgs

    def Start(self):
        if not config.CONFIG["Rekall.enabled"]:
            raise RuntimeError(
                "Rekall flows are disabled. "
                "Add 'Rekall.enabled: True' to the config to enable "
                "them.")

        self.state.output_urn = None

        # Use Rekall to grab memory. We no longer manually check for kcore's
        # existence since Rekall does it for us and runs additionally checks (like
        # the actual usability of kcore).
        client = aff4.FACTORY.Open(self.client_id, token=self.token)
        memory_size = client.Get(client.Schema.MEMORY_SIZE)
        self.state.memory_size = memory_size

        # Should we check if there is enough free space?
        if self.args.check_disk_free_space:
            self.CallClient(server_stubs.CheckFreeGRRTempSpace,
                            next_state="CheckFreeSpace")
        else:
            self.RunRekallPlugin()

    def CheckFreeSpace(self, responses):
        if responses.success and responses.First():
            disk_usage = responses.First()
            if disk_usage.free < self.state.memory_size:
                raise flow.FlowError(
                    "Free space may be too low for local copy. Free "
                    "space for path %s is %s bytes. Mem size is: %s "
                    "bytes. Override with check_disk_free_space=False." %
                    (disk_usage.path, disk_usage.free, self.state.memory_size))
        else:
            logging.error(
                "Couldn't determine free disk space for temporary files.")

        self.RunRekallPlugin()

    def RunRekallPlugin(self):
        plugin = rdf_rekall_types.PluginRequest(plugin="aff4acquire")
        plugin.args["destination"] = "GRR"
        request = rdf_rekall_types.RekallRequest(plugins=[plugin])

        # Note that this will actually also retrieve the memory image.
        self.CallFlow(AnalyzeClientMemory.__name__,
                      request=request,
                      max_file_size_download=self.args.max_file_size,
                      next_state="CheckAnalyzeClientMemory")

    def CheckAnalyzeClientMemory(self, responses):
        if not responses.success:
            raise flow.FlowError("Unable to image memory: %s." %
                                 responses.status)

        for response in responses:
            for download in response.downloaded_files:
                self.state.output_urn = download
                self.SendReply(download)
                self.Log("Memory imaged successfully")
                return

        raise flow.FlowError("Rekall flow did not return any files.")
Exemple #3
0
class AnalyzeClientMemory(flow.GRRFlow):
    """Runs client side analysis using Rekall.

  This flow takes a list of Rekall plugins to run. It then sends the list of
  Rekall commands to the client. The client will run those plugins using the
  client's copy of Rekall.
  """

    category = "/Memory/"
    behaviours = flow.FlowBehaviour("DEBUG")
    args_type = AnalyzeClientMemoryArgs

    def Start(self):
        if not config.CONFIG["Rekall.enabled"]:
            raise RuntimeError(
                "Rekall flows are disabled. "
                "Add 'Rekall.enabled: True' to the config to enable "
                "them.")

        self.CallStateInline(next_state="StartAnalysis")

    def StartAnalysis(self, responses):
        self.state.rekall_context_messages = {}
        self.state.output_files = []
        self.state.plugin_errors = []

        request = self.args.request.Copy()

        # We always push the inventory to the request. This saves a round trip
        # because the client always needs it (so it can figure out if its cache is
        # still valid).
        request.profiles.append(
            self.GetProfileByName(
                "inventory", server_stubs.REKALL_PROFILE_REPOSITORY_VERSION))

        if self.args.debug_logging:
            request.session[u"logging_level"] = u"DEBUG"

        # We want to disable local profile building on the client machines.
        request.session[u"autodetect_build_local"] = u"none"

        # The client will use rekall in live mode.
        if "live" not in request.session:
            request.session["live"] = "Memory"

        self.state.rekall_request = request

        self.CallClient(server_stubs.RekallAction,
                        self.state.rekall_request,
                        next_state="StoreResults")

    def UpdateProfile(self, responses):
        """The target of the WriteRekallProfile client action."""
        if not responses.success:
            self.Log(responses.status)

    def StoreResults(self, responses):
        """Stores the results."""
        if not responses.success:
            self.state.plugin_errors.append(
                unicode(responses.status.error_message))
            # Keep processing to read out the debug messages from the json.

        self.Log("Rekall returned %s responses." % len(responses))
        for response in responses:
            if response.missing_profile:
                profile = self.GetProfileByName(response.missing_profile,
                                                response.repository_version)
                if profile:
                    self.CallClient(server_stubs.WriteRekallProfile,
                                    profile,
                                    next_state="UpdateProfile")
                else:
                    self.Log(
                        "Needed profile %s not found! See "
                        "https://github.com/google/grr-doc/blob/master/"
                        "troubleshooting.adoc#missing-rekall-profiles",
                        response.missing_profile)

            if response.json_messages:
                response.client_urn = self.client_id
                if self.state.rekall_context_messages:
                    response.json_context_messages = json.dumps(
                        list(iteritems(self.state.rekall_context_messages)),
                        separators=(",", ":"))

                json_data = json.loads(response.json_messages)
                for message in json_data:
                    if len(message) >= 1:
                        if message[0] in ["t", "s"]:
                            self.state.rekall_context_messages[
                                message[0]] = message[1]

                        if message[0] == "file":
                            pathspec = rdf_paths.PathSpec(**message[1])
                            self.state.output_files.append(pathspec)

                        if message[0] == "L":
                            if len(message) > 1:
                                log_record = message[1]
                                self.Log("%s:%s:%s", log_record["level"],
                                         log_record["name"], log_record["msg"])

                self.SendReply(response)

        if (responses.iterator and  # This will be None if an error occurred.
                responses.iterator.state !=
                rdf_client.Iterator.State.FINISHED):
            self.state.rekall_request.iterator = responses.iterator
            self.CallClient(server_stubs.RekallAction,
                            self.state.rekall_request,
                            next_state="StoreResults")
        else:
            if self.state.output_files:
                self.Log("Getting %i files.", len(self.state.output_files))
                self.CallFlow(transfer.MultiGetFile.__name__,
                              pathspecs=self.state.output_files,
                              file_size=self.args.max_file_size_download,
                              next_state="DeleteFiles")

    def DeleteFiles(self, responses):
        # Check that the MultiGetFile flow worked.
        if not responses.success:
            raise flow.FlowError("Could not get files: %s" % responses.status)

        for output_file in self.state.output_files:
            self.CallClient(server_stubs.DeleteGRRTempFiles,
                            output_file,
                            next_state="LogDeleteFiles")

        # Let calling flows know where files ended up in AFF4 space.
        self.SendReply(
            rdf_rekall_types.RekallResponse(downloaded_files=[
                x.AFF4Path(self.client_urn) for x in responses
            ]))

    def LogDeleteFiles(self, responses):
        # Check that the DeleteFiles flow worked.
        if not responses.success:
            raise flow.FlowError("Could not delete file: %s" %
                                 responses.status)

    def End(self, responses):
        del responses
        if self.state.plugin_errors:
            all_errors = u"\n".join(
                [unicode(e) for e in self.state.plugin_errors])
            raise flow.FlowError("Error running plugins: %s" % all_errors)

    def GetProfileByName(self, name, version):
        """Load the requested profile from the repository."""
        server_type = config.CONFIG["Rekall.profile_server"]
        logging.info("Getting missing Rekall profile '%s' from %s", name,
                     server_type)

        profile_server = rekall_profile_server.ProfileServer.classes[
            server_type]()

        return profile_server.GetProfileByName(name, version=version)
Exemple #4
0
class ListVADBinaries(flow.GRRFlow):
    r"""Get list of all running binaries from Rekall, (optionally) fetch them.

    This flow executes the "vad" Rekall plugin to get the list of all
    currently running binaries (including dynamic libraries). Then if
    fetch_binaries option is set to True, it fetches all the binaries it has
    found.

    There is a caveat regarding using the "vad" plugin to detect currently
    running executable binaries. The "Filename" member of the _FILE_OBJECT
    struct is not reliable:

      * Usually it does not include volume information: i.e.
        \\Windows\\some\\path. Therefore it's impossible to detect the actual
        volume where the executable is located.

      * If the binary is executed from a shared network volume, the Filename
        attribute is not descriptive enough to easily fetch the file.

      * If the binary is executed directly from a network location (without
        mounting the volume) Filename attribute will contain yet another
        form of path.

      * Filename attribute is not actually used by the system (it's probably
        there for debugging purposes). It can be easily overwritten by a rootkit
        without any noticeable consequences for the running system, but breaking
        our functionality as a result.

    Therefore this plugin's functionality is somewhat limited. Basically, it
    won't fetch binaries that are located on non-default volumes.

    Possible workaround (future work):
    * Find a way to map given address space into the filename on the filesystem.
    * Fetch binaries directly from memory by forcing page-ins first (via
      some debug userland-process-dump API?) and then reading the memory.
  """
    category = "/Memory/"
    behaviours = flow.FlowBehaviour("DEBUG")
    args_type = ListVADBinariesArgs

    def Start(self):
        """Request VAD data."""
        if not config.CONFIG["Rekall.enabled"]:
            raise RuntimeError(
                "Rekall flows are disabled. "
                "Add 'Rekall.enabled: True' to the config to enable "
                "them.")

        self.CallFlow(
            # TODO(user): dependency loop between collectors.py and memory.py.
            # collectors.ArtifactCollectorFlow.__name__,
            "ArtifactCollectorFlow",
            artifact_list=["FullVADBinaryList"],
            next_state="FetchBinaries")

    def FetchBinaries(self, responses):
        """Parses the Rekall response and initiates FileFinder flows."""
        if not responses.success:
            self.Log("Error fetching VAD data: %s", responses.status)
            return

        self.Log("Found %d binaries", len(responses))

        if self.args.filename_regex:
            binaries = []
            for response in responses:
                if self.args.filename_regex.Match(response.CollapsePath()):
                    binaries.append(response)

            self.Log("Applied filename regex. Have %d files after filtering.",
                     len(binaries))
        else:
            binaries = responses

        if self.args.fetch_binaries:
            self.CallFlow(file_finder.FileFinder.__name__,
                          next_state="HandleDownloadedFiles",
                          paths=[
                              rdf_paths.GlobExpression(b.CollapsePath())
                              for b in binaries
                          ],
                          pathtype=rdf_paths.PathSpec.PathType.OS,
                          action=rdf_file_finder.FileFinderAction.Download())
        else:
            for b in binaries:
                self.SendReply(b)

    def HandleDownloadedFiles(self, responses):
        """Handle success/failure of the FileFinder flow."""
        if responses.success:
            for file_finder_result in responses:
                self.SendReply(file_finder_result.stat_entry)
                self.Log("Downloaded %s",
                         file_finder_result.stat_entry.pathspec.CollapsePath())
        else:
            self.Log("Binaries download failed: %s", responses.status)
Exemple #5
0
class FlowBase(with_metaclass(registry.FlowRegistry, object)):
  """The base class for new style flow objects."""

  # Alternatively we can specify a single semantic protobuf that will be used to
  # provide the args.
  args_type = flow.EmptyFlowArgs

  # This is used to arrange flows into a tree view
  category = ""
  friendly_name = None

  # Behaviors set attributes of this flow. See FlowBehavior() in
  # grr_response_server/flow.py.
  behaviours = flow.FlowBehaviour("ADVANCED")

  def __init__(self, rdf_flow):
    self.rdf_flow = rdf_flow
    self.flow_requests = []
    self.flow_responses = []
    self.client_messages = []
    self.completed_requests = []
    self.replies_to_process = []
    self.replies_to_write = []

    # TODO(amoser): Remove when AFF4 is gone.
    self.token = access_control.ACLToken(username=self.creator)

    self._state = None

  def Start(self):
    """The first state of the flow."""

  def End(self, responses):
    """Final state.

    This method is called prior to destruction of the flow.

    Args:
      responses: The responses for this state.
    """

  def HeartBeat(self):
    """New-style flows don't need heart-beat, keeping for compatibility."""
    pass

  def CallState(self, next_state="", start_time=None):
    """This method is used to schedule a new state on a different worker.

    This is basically the same as CallFlow() except we are calling
    ourselves. The state will be invoked at a later time.

    Args:
       next_state: The state in this flow to be invoked.
       start_time: Start the flow at this time. This delays notification for
         flow processing into the future. Note that the flow may still be
         processed earlier if there are client responses waiting.

    Raises:
      ValueError: The next state specified does not exist.
    """
    if not getattr(self, next_state):
      raise ValueError("Next state %s is invalid." % next_state)

    flow_request = rdf_flow_objects.FlowRequest(
        client_id=self.rdf_flow.client_id,
        flow_id=self.rdf_flow.flow_id,
        request_id=self.GetNextOutboundId(),
        next_state=next_state,
        start_time=start_time,
        needs_processing=True)

    self.flow_requests.append(flow_request)

  def CallStateInline(self,
                      messages=None,
                      next_state="",
                      request_data=None,
                      responses=None):
    if responses is None:
      responses = flow_responses.FakeResponses(messages, request_data)

    getattr(self, next_state)(responses)

  def CallClient(self,
                 action_cls,
                 request=None,
                 next_state=None,
                 request_data=None,
                 **kwargs):
    """Calls the client asynchronously.

    This sends a message to the client to invoke an Action. The run action may
    send back many responses that will be queued by the framework until a status
    message is sent by the client. The status message will cause the entire
    transaction to be committed to the specified state.

    Args:
       action_cls: The function to call on the client.
       request: The request to send to the client. If not specified, we create a
         new RDFValue using the kwargs.
       next_state: The state in this flow, that responses to this message should
         go to.
       request_data: A dict which will be available in the RequestState
         protobuf. The Responses object maintains a reference to this protobuf
         for use in the execution of the state method. (so you can access this
         data by responses.request).
       **kwargs: These args will be used to construct the client action argument
         rdfvalue.

    Raises:
       ValueError: The request passed to the client does not have the correct
                   type.
    """
    if action_cls.in_rdfvalue is None:
      if request:
        raise ValueError(
            "Client action %s does not expect args." % action_cls.__name__)
    else:
      if request is None:
        # Create a new rdf request.
        request = action_cls.in_rdfvalue(**kwargs)
      else:
        # Verify that the request type matches the client action requirements.
        if not isinstance(request, action_cls.in_rdfvalue):
          raise ValueError("Client action expected %s but got %s" %
                           (action_cls.in_rdfvalue, type(request)))

    outbound_id = self.GetNextOutboundId()

    # Create a flow request.
    flow_request = rdf_flow_objects.FlowRequest(
        client_id=self.rdf_flow.client_id,
        flow_id=self.rdf_flow.flow_id,
        request_id=outbound_id,
        next_state=next_state)

    if request_data is not None:
      flow_request.request_data = rdf_protodict.Dict().FromDict(request_data)

    msg = rdf_flows.GrrMessage(
        session_id=self.rdf_flow.long_flow_id,
        name=action_cls.__name__,
        request_id=outbound_id,
        queue=self.rdf_flow.client_id,
        payload=request,
        generate_task_id=True)

    if self.rdf_flow.cpu_limit:
      cpu_usage = self.rdf_flow.cpu_time_used
      msg.cpu_limit = max(
          self.rdf_flow.cpu_limit - cpu_usage.user_cpu_time -
          cpu_usage.system_cpu_time, 0)

      if msg.cpu_limit == 0:
        raise flow.FlowError("CPU limit exceeded.")

    if self.rdf_flow.network_bytes_limit:
      msg.network_bytes_limit = max(
          self.rdf_flow.network_bytes_limit - self.rdf_flow.network_bytes_sent,
          0)
      if msg.network_bytes_limit == 0:
        raise flow.FlowError("Network limit exceeded.")

    self.flow_requests.append(flow_request)
    self.client_messages.append(msg)

  def CallFlow(self,
               flow_name=None,
               next_state=None,
               request_data=None,
               client_id=None,
               base_session_id=None,
               **kwargs):
    """Creates a new flow and send its responses to a state.

    This creates a new flow. The flow may send back many responses which will be
    queued by the framework until the flow terminates. The final status message
    will cause the entire transaction to be committed to the specified state.

    Args:
       flow_name: The name of the flow to invoke.
       next_state: The state in this flow, that responses to this message should
         go to.
       request_data: Any dict provided here will be available in the
         RequestState protobuf. The Responses object maintains a reference to
         this protobuf for use in the execution of the state method. (so you can
         access this data by responses.request). There is no format mandated on
         this data but it may be a serialized protobuf.
       client_id: If given, the flow is started for this client.
       base_session_id: A URN which will be used to build a URN.
       **kwargs: Arguments for the child flow.

    Returns:
       The flow_id of the child flow which was created.

    Raises:
      ValueError: The requested next state does not exist.
    """
    if not getattr(self, next_state):
      raise ValueError("Next state %s is invalid." % next_state)

    flow_request = rdf_flow_objects.FlowRequest(
        client_id=self.rdf_flow.client_id,
        flow_id=self.rdf_flow.flow_id,
        request_id=self.GetNextOutboundId(),
        next_state=next_state)

    if request_data is not None:
      flow_request.request_data = rdf_protodict.Dict().FromDict(request_data)

    self.flow_requests.append(flow_request)

    flow_cls = registry.FlowRegistry.FlowClassByName(flow_name)

    flow.StartFlow(
        client_id=self.rdf_flow.client_id,
        flow_cls=flow_cls,
        parent_flow_obj=self,
        **kwargs)

  def SendReply(self, response, tag=None):
    """Allows this flow to send a message to its parent flow.

    If this flow does not have a parent, the message is ignored.

    Args:
      response: An RDFValue() instance to be sent to the parent.
      tag: If specified, tag the result with this tag.

    Raises:
      ValueError: If responses is not of the correct type.
    """
    if not isinstance(response, rdfvalue.RDFValue):
      raise ValueError("SendReply can only send RDFValues")

    if self.rdf_flow.parent_flow_id:
      response = rdf_flow_objects.FlowResponse(
          client_id=self.rdf_flow.client_id,
          request_id=self.rdf_flow.parent_request_id,
          response_id=self.GetNextResponseId(),
          payload=response,
          flow_id=self.rdf_flow.parent_flow_id,
          tag=tag)

      self.flow_responses.append(response)
    else:
      reply = rdf_flow_objects.FlowResult(payload=response, tag=tag)
      self.replies_to_write.append(reply)
      self.replies_to_process.append(reply)

    self.rdf_flow.num_replies_sent += 1

  def SaveResourceUsage(self, status):
    """Method to tally resources."""
    user_cpu = status.cpu_time_used.user_cpu_time
    system_cpu = status.cpu_time_used.system_cpu_time
    self.rdf_flow.cpu_time_used.user_cpu_time += user_cpu
    self.rdf_flow.cpu_time_used.system_cpu_time += system_cpu

    self.rdf_flow.network_bytes_sent += status.network_bytes_sent

    if self.rdf_flow.cpu_limit:
      user_cpu_total = self.rdf_flow.cpu_time_used.user_cpu_time
      system_cpu_total = self.rdf_flow.cpu_time_used.system_cpu_time
      if self.rdf_flow.cpu_limit < (user_cpu_total + system_cpu_total):
        # We have exceeded our limit, stop this flow.
        raise flow.FlowError("CPU limit exceeded.")

    if (self.rdf_flow.network_bytes_limit and
        self.rdf_flow.network_bytes_limit < self.rdf_flow.network_bytes_sent):
      # We have exceeded our byte limit, stop this flow.
      raise flow.FlowError("Network bytes limit exceeded.")

  def Error(self, error_message=None, backtrace=None, status=None):
    """Terminates this flow with an error."""
    client_id = self.rdf_flow.client_id
    flow_id = self.rdf_flow.flow_id

    logging.error("Error in flow %s on %s: %s, %s", flow_id, client_id,
                  error_message, backtrace)

    if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id:
      status_msg = rdf_flow_objects.FlowStatus(
          client_id=client_id,
          request_id=self.rdf_flow.parent_request_id,
          response_id=self.GetNextResponseId(),
          cpu_time_used=self.rdf_flow.cpu_time_used,
          network_bytes_sent=self.rdf_flow.network_bytes_sent,
          error_message=error_message,
          flow_id=self.rdf_flow.parent_flow_id,
          backtrace=backtrace)

      if status is not None:
        status_msg.status = status
      else:
        status_msg.status = rdf_flow_objects.FlowStatus.Status.ERROR

      if self.rdf_flow.parent_flow_id:
        self.flow_responses.append(status_msg)
      elif self.rdf_flow.parent_hunt_id:
        db_compat.ProcessHuntFlowError(
            self.rdf_flow,
            error_message=error_message,
            backtrace=backtrace,
            status_msg=status_msg)

    self.rdf_flow.flow_state = self.rdf_flow.FlowState.ERROR
    if backtrace is not None:
      self.rdf_flow.backtrace = backtrace
    if error_message is not None:
      self.rdf_flow.error_message = error_message

    self.NotifyCreatorOfError()

  def NotifyCreatorOfError(self):
    if self.ShouldSendNotifications():
      client_id = self.rdf_flow.client_id
      flow_id = self.rdf_flow.flow_id

      flow_ref = rdf_objects.FlowReference(client_id=client_id, flow_id=flow_id)
      notification_lib.Notify(
          self.creator, rdf_objects.UserNotification.Type.TYPE_FLOW_RUN_FAILED,
          "Flow %s on %s terminated due to error" % (flow_id, client_id),
          rdf_objects.ObjectReference(
              reference_type=rdf_objects.ObjectReference.Type.FLOW,
              flow=flow_ref))

  def _ClearAllRequestsAndResponses(self):
    client_id = self.rdf_flow.client_id
    flow_id = self.rdf_flow.flow_id

    # Remove all requests queued for deletion that we delete in the call below.
    self.completed_requests = [
        r for r in self.completed_requests
        if r.client_id != client_id or r.flow_id != flow_id
    ]

    data_store.REL_DB.DeleteAllFlowRequestsAndResponses(client_id, flow_id)

  def NotifyAboutEnd(self):
    # Sum up number of replies to write with the number of already
    # written results.
    num_results = (
        len(self.replies_to_write) + data_store.REL_DB.CountFlowResults(
            self.rdf_flow.client_id, self.rdf_flow.flow_id))
    flow_ref = rdf_objects.FlowReference(
        client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id)
    notification_lib.Notify(
        self.creator, rdf_objects.UserNotification.Type.TYPE_FLOW_RUN_COMPLETED,
        "Flow %s completed with %d %s" % (self.__class__.__name__, num_results,
                                          num_results == 1 and "result" or
                                          "results"),
        rdf_objects.ObjectReference(
            reference_type=rdf_objects.ObjectReference.Type.FLOW,
            flow=flow_ref))

  def MarkDone(self, status=None):
    """Marks this flow as done."""
    # Notify our parent flow or hunt that we are done (if there's a parent flow
    # or hunt).
    if self.rdf_flow.parent_flow_id or self.rdf_flow.parent_hunt_id:
      status = rdf_flow_objects.FlowStatus(
          client_id=self.rdf_flow.client_id,
          request_id=self.rdf_flow.parent_request_id,
          response_id=self.GetNextResponseId(),
          status=rdf_flow_objects.FlowStatus.Status.OK,
          cpu_time_used=self.rdf_flow.cpu_time_used,
          network_bytes_sent=self.rdf_flow.network_bytes_sent,
          flow_id=self.rdf_flow.parent_flow_id)
      if self.rdf_flow.parent_flow_id:
        self.flow_responses.append(status)
      elif self.rdf_flow.parent_hunt_id:
        db_compat.ProcessHuntFlowDone(self.rdf_flow, status_msg=status)

    self.rdf_flow.flow_state = self.rdf_flow.FlowState.FINISHED

    if self.ShouldSendNotifications():
      self.NotifyAboutEnd()

  def Log(self, format_str, *args):
    """Logs the message using the flow's standard logging.

    Args:
      format_str: Format string
      *args: arguments to the format string
    """
    if self.rdf_flow.parent_hunt_id:
      db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args)
    else:
      log_entry = rdf_flow_objects.FlowLogEntry(message=format_str % args)
      data_store.REL_DB.WriteFlowLogEntries(self.rdf_flow.client_id,
                                            self.rdf_flow.flow_id, [log_entry])

  def RunStateMethod(self, method_name, request=None, responses=None):
    """Completes the request by calling the state method.

    Args:
      method_name: The name of the state method to call.
      request: A RequestState protobuf.
      responses: A list of GrrMessages responding to the request.
    """
    if self.rdf_flow.pending_termination:
      self.Error(error_message=self.rdf_flow.pending_termination.reason)
      return

    client_id = self.rdf_flow.client_id

    deadline = self.rdf_flow.processing_deadline
    if deadline and rdfvalue.RDFDatetime.Now() > deadline:
      raise flow.FlowError("Processing time for flow %s on %s expired." %
                           (self.rdf_flow.flow_id, self.rdf_flow.client_id))

    self.rdf_flow.current_state = method_name
    if request and responses:
      logging.debug("Running %s for flow %s on %s, %d responses.", method_name,
                    self.rdf_flow.flow_id, client_id, len(responses))
    else:
      logging.debug("Running %s for flow %s on %s", method_name,
                    self.rdf_flow.flow_id, client_id)

    try:
      try:
        method = getattr(self, method_name)
      except AttributeError:
        raise ValueError("Flow %s has no state method %s" %
                         (self.__class__.__name__, method_name))

      # Prepare a responses object for the state method to use:
      responses = flow_responses.Responses.FromResponses(
          request=request, responses=responses)

      if responses.status is not None:
        self.SaveResourceUsage(responses.status)

      stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")

      if method_name == "Start":
        stats_collector_instance.Get().IncrementCounter(
            "flow_starts", fields=[self.rdf_flow.flow_class_name])
        method()
      else:
        method(responses)

      if self.replies_to_process:
        self._ProcessRepliesWithOutputPlugins(self.replies_to_process)
        self.replies_to_process = []

    # We don't know here what exceptions can be thrown in the flow but we have
    # to continue. Thus, we catch everything.
    except Exception as e:  # pylint: disable=broad-except
      # This flow will terminate now
      stats_collector_instance.Get().IncrementCounter(
          "flow_errors", fields=[self.rdf_flow.flow_class_name])
      logging.exception("Flow %s on %s raised %s.", self.rdf_flow.flow_id,
                        client_id, utils.SmartUnicode(e))

      self.Error(
          error_message=utils.SmartUnicode(e), backtrace=traceback.format_exc())

  def ProcessAllReadyRequests(self):
    """Processes all requests that are due to run.

    Returns:
      The number of processed requests.
    """
    request_dict = data_store.REL_DB.ReadFlowRequestsReadyForProcessing(
        self.rdf_flow.client_id,
        self.rdf_flow.flow_id,
        next_needed_request=self.rdf_flow.next_request_to_process)
    if not request_dict:
      return 0

    processed = 0
    while self.rdf_flow.next_request_to_process in request_dict:
      request, responses = request_dict[self.rdf_flow.next_request_to_process]
      self.RunStateMethod(request.next_state, request, responses)
      self.rdf_flow.next_request_to_process += 1
      processed += 1
      self.completed_requests.append(request)

    if processed and self.IsRunning() and not self.outstanding_requests:
      self.RunStateMethod("End")
      if (self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING and
          not self.outstanding_requests):
        self.MarkDone()

    self.PersistState()

    if not self.IsRunning():
      # All requests and responses can now be deleted.
      self._ClearAllRequestsAndResponses()

    return processed

  @property
  def outstanding_requests(self):
    """Returns the number of all outstanding requests.

    This is used to determine if the flow needs to be destroyed yet.

    Returns:
       the number of all outstanding requests.
    """
    return (
        self.rdf_flow.next_outbound_id - self.rdf_flow.next_request_to_process)

  def GetNextOutboundId(self):
    my_id = self.rdf_flow.next_outbound_id
    self.rdf_flow.next_outbound_id += 1
    return my_id

  def GetCurrentOutboundId(self):
    return self.rdf_flow.next_outbound_id - 1

  def GetNextResponseId(self):
    self.rdf_flow.response_count += 1
    return self.rdf_flow.response_count

  def FlushQueuedMessages(self):
    # TODO(amoser): This could be done in a single db call, might be worth
    # optimizing.

    if self.flow_requests:
      data_store.REL_DB.WriteFlowRequests(self.flow_requests)
      self.flow_requests = []

    if self.flow_responses:
      data_store.REL_DB.WriteFlowResponses(self.flow_responses)
      self.flow_responses = []

    if self.client_messages:
      client_id = self.rdf_flow.client_id
      if fleetspeak_utils.IsFleetspeakEnabledClient(client_id):
        for task in self.client_messages:
          fleetspeak_utils.SendGrrMessageThroughFleetspeak(client_id, task)
      else:
        data_store.REL_DB.WriteClientMessages(self.client_messages)

      self.client_messages = []

    if self.completed_requests:
      data_store.REL_DB.DeleteFlowRequests(self.completed_requests)
      self.completed_requests = []

    if self.replies_to_write:
      # For top-level hunt-induced flows, write results to the hunt collection.
      if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id:
        db_compat.WriteHuntResults(self.rdf_flow.client_id,
                                   self.rdf_flow.parent_hunt_id,
                                   self.replies_to_write)
      else:
        data_store.REL_DB.WriteFlowResults(self.rdf_flow.client_id,
                                           self.rdf_flow.flow_id,
                                           self.replies_to_write)
      self.replies_to_write = []

  def _ProcessRepliesWithOutputPlugins(self, replies):
    """Processes replies with output plugins."""
    for output_plugin_state in self.rdf_flow.output_plugins_states:
      plugin_descriptor = output_plugin_state.plugin_descriptor
      output_plugin_cls = plugin_descriptor.GetPluginClass()
      output_plugin = output_plugin_cls(
          source_urn=self.rdf_flow.long_flow_id,
          args=plugin_descriptor.plugin_args,
          token=access_control.ACLToken(username=self.rdf_flow.creator))

      try:
        output_plugin.ProcessResponses(output_plugin_state.plugin_state,
                                       [r.payload for r in replies])
        output_plugin.Flush(output_plugin_state.plugin_state)
        output_plugin.UpdateState(output_plugin_state.plugin_state)

        log_item = output_plugin_lib.OutputPluginBatchProcessingStatus(
            plugin_descriptor=plugin_descriptor,
            status="SUCCESS",
            batch_size=len(replies))
        output_plugin_state.Log(log_item)

        self.Log("Plugin %s successfully processed %d flow replies.",
                 plugin_descriptor, len(replies))
      except Exception as e:  # pylint: disable=broad-except
        error = output_plugin_lib.OutputPluginBatchProcessingStatus(
            plugin_descriptor=plugin_descriptor,
            status="ERROR",
            summary=utils.SmartUnicode(e),
            batch_size=len(replies))
        output_plugin_state.Error(error)

        self.Log("Plugin %s failed to process %d replies due to: %s",
                 plugin_descriptor, len(replies), e)

  def MergeQueuedMessages(self, flow_obj):
    self.flow_requests.extend(flow_obj.flow_requests)
    flow_obj.flow_requests = []
    self.flow_responses.extend(flow_obj.flow_responses)
    flow_obj.flow_responses = []
    self.client_messages.extend(flow_obj.client_messages)
    flow_obj.client_messages = []
    self.completed_requests.extend(flow_obj.completed_requests)
    flow_obj.completed_requests = []
    self.replies_to_write.extend(flow_obj.replies_to_write)
    flow_obj.replies_to_write = []

  def ShouldSendNotifications(self):
    return bool(not self.rdf_flow.parent_flow_id and
                not self.rdf_flow.parent_hunt_id and self.creator and
                self.creator not in aff4_users.GRRUser.SYSTEM_USERS)

  def IsRunning(self):
    return self.rdf_flow.flow_state == self.rdf_flow.FlowState.RUNNING

  @property
  def client_id(self):
    return self.rdf_flow.client_id

  @property
  def client_urn(self):
    return rdfvalue.RDFURN(self.client_id)

  @property
  def state(self):
    if self._state is None:
      self._state = flow.AttributedDict(self.rdf_flow.persistent_data.ToDict())
    return self._state

  def PersistState(self):
    if self._state is not None:
      self.rdf_flow.persistent_data = self._state

  @property
  def args(self):
    return self.rdf_flow.args

  @property
  def client_version(self):
    try:
      return self._client_version
    except AttributeError:
      self._client_version = data_store_utils.GetClientVersion(self.client_id)

    return self._client_version

  @property
  def client_os(self):
    try:
      return self._client_os
    except AttributeError:
      self._client_os = data_store_utils.GetClientOs(self.client_id)

    return self._client_os

  @property
  def creator(self):
    return self.rdf_flow.creator

  @classmethod
  def GetDefaultArgs(cls, username=None):
    return cls.args_type()