Beispiel #1
0
    def MarkDone(self, responses):
        """Mark a client as done."""
        client_id = responses.request.client_id

        # Open child flow and account its' reported resource usage
        flow_path = responses.status.child_session_id
        status = responses.status

        resources = rdfvalue.ClientResources()
        resources.client_id = client_id
        resources.session_id = flow_path
        resources.cpu_usage.user_cpu_time = status.cpu_time_used.user_cpu_time
        resources.cpu_usage.system_cpu_time = status.cpu_time_used.system_cpu_time
        resources.network_bytes_sent = status.network_bytes_sent
        self.state.context.usage_stats.RegisterResources(resources)

        if responses.success:
            msg = "Flow %s completed." % self.state.context.description,
            self.LogResult(client_id, msg)

            with self.lock:
                self.processed_responses = True
                msgs = [
                    rdfvalue.GrrMessage(payload=response, source=client_id)
                    for response in responses
                ]
                self.state.context.results_collection.AddAll(msgs)

        else:
            self.LogClientError(client_id,
                                log_message=utils.SmartStr(responses.status))

        self.MarkClientDone(client_id)
Beispiel #2
0
    def InitializeContext(self, args):
        """Initializes the context of this flow."""
        if args is None:
            args = rdfvalue.FlowRunnerArgs()

        context = flows.DataObject(
            args=args,
            backtrace=None,
            client_resources=rdfvalue.ClientResources(),
            create_time=rdfvalue.RDFDatetime().Now(),
            creator=self.token.username,
            current_state="Start",
            network_bytes_sent=0,
            next_outbound_id=1,
            next_processed_request=1,
            next_states=set(),
            output=self._CreateOutputCollection(args),
            outstanding_requests=0,
            remaining_cpu_quota=args.cpu_limit,
            state=rdfvalue.Flow.State.RUNNING,
            user=self.token.username,

            # Have we sent a notification to the user.
            user_notified=False,
        )

        # Store the context in the flow_obj for next time.
        self.flow_obj.state.Register("context", context)

        return context
Beispiel #3
0
    def StoreResourceUsage(self, responses, client_id):
        """Open child flow and account its' reported resource usage."""
        flow_path = responses.status.child_session_id
        status = responses.status

        resources = rdfvalue.ClientResources()
        resources.client_id = client_id
        resources.session_id = flow_path
        resources.cpu_usage.user_cpu_time = status.cpu_time_used.user_cpu_time
        resources.cpu_usage.system_cpu_time = status.cpu_time_used.system_cpu_time
        resources.network_bytes_sent = status.network_bytes_sent
        self.state.context.usage_stats.RegisterResources(resources)
Beispiel #4
0
    def InitializeContext(self, args):
        """Initializes the context of this flow."""
        if args is None:
            args = rdfvalue.FlowRunnerArgs()

        output_collection = self._CreateOutputCollection(args)
        # Output collection is nullified when flow is terminated, so we're
        # keeping the urn separately for further reference.
        output_urn = (output_collection is not None) and output_collection.urn

        output_plugins_states = []
        for plugin_descriptor in args.output_plugins:
            plugin_class = plugin_descriptor.GetPluginClass()
            plugin = plugin_class(output_urn,
                                  args=plugin_descriptor.plugin_args,
                                  token=self.token)
            try:
                plugin.Initialize()
                output_plugins_states.append((plugin_descriptor, plugin.state))
            except Exception as e:  # pylint: disable=broad-except
                self.Log("Plugin %s failed to initialize (%s), ignoring it." %
                         (plugin, e))

        context = utils.DataObject(
            args=args,
            backtrace=None,
            client_resources=rdfvalue.ClientResources(),
            create_time=rdfvalue.RDFDatetime().Now(),
            creator=args.creator or self.token.username,
            current_state="Start",
            # If not None, kill-stuck-flow notification is scheduled at the given
            # time.
            kill_timestamp=None,
            network_bytes_sent=0,
            next_outbound_id=1,
            next_processed_request=1,
            next_states=set(),
            output=output_collection,
            output_plugins_states=output_plugins_states,
            output_urn=output_urn,
            outstanding_requests=0,
            remaining_cpu_quota=args.cpu_limit,
            state=rdfvalue.Flow.State.RUNNING,

            # Have we sent a notification to the user.
            user_notified=False,
        )

        # Store the context in the flow_obj for next time.
        self.flow_obj.state.Register("context", context)

        return context
Beispiel #5
0
    def InitializeContext(self, args):
        """Initializes the context of this hunt."""
        if args is None:
            args = HuntRunnerArgs()

        # For large hunts, checking client limits creates a high load on the foreman
        # since it needs to read the hunt object's client list. We therefore don't
        # allow setting it for large hunts. Note that client_limit of 0 means
        # unlimited which is allowed (the foreman then does not need to check the
        # client list)..
        if args.client_limit > 1000:
            raise RuntimeError("Please specify client_limit <= 1000.")

        context = utils.DataObject(
            args=args,
            backtrace=None,
            client_resources=rdfvalue.ClientResources(),
            create_time=rdfvalue.RDFDatetime().Now(),
            creator=self.token.username,
            expires=rdfvalue.RDFDatetime().Now(),
            # If not None, kill-stuck-flow notification is scheduled at the given
            # time.
            kill_timestamp=None,
            network_bytes_sent=0,
            next_client_due=0,
            next_outbound_id=1,
            next_processed_request=1,
            next_states=set(),
            outstanding_requests=0,
            current_state=None,
            start_time=rdfvalue.RDFDatetime().Now(),

            # Hunts are always in the running state.
            state=rdfvalue.Flow.State.RUNNING,
            usage_stats=rdfvalue.ClientResourcesStats(),
            remaining_cpu_quota=args.cpu_limit,
        )

        # Store the context in the flow_obj for next time.
        self.flow_obj.state.Register("context", context)

        return context
Beispiel #6
0
    def InitializeContext(self, args):
        """Initializes the context of this flow."""
        if args is None:
            args = rdfvalue.FlowRunnerArgs()

        output_collection = self._CreateOutputCollection(args)
        context = utils.DataObject(
            args=args,
            backtrace=None,
            client_resources=rdfvalue.ClientResources(),
            create_time=rdfvalue.RDFDatetime().Now(),
            creator=args.creator or self.token.username,
            current_state="Start",
            # If not None, kill-stuck-flow notification is scheduled at the given
            # time.
            kill_timestamp=None,
            network_bytes_sent=0,
            next_outbound_id=1,
            next_processed_request=1,
            next_states=set(),
            output=output_collection,
            # Output collection is nullified when flow is terminated, so we're
            # keeping the urn separately for further reference.
            output_urn=(output_collection is not None)
            and output_collection.urn,
            outstanding_requests=0,
            remaining_cpu_quota=args.cpu_limit,
            state=rdfvalue.Flow.State.RUNNING,

            # Have we sent a notification to the user.
            user_notified=False,
        )

        # Store the context in the flow_obj for next time.
        self.flow_obj.state.Register("context", context)

        return context