def StoreResourceUsage(self, responses, client_id): """Open child flow and account its' reported resource usage.""" flow_path = responses.status.child_session_id status = responses.status resources = rdf_client.ClientResources() resources.client_id = client_id resources.session_id = flow_path resources.cpu_usage.user_cpu_time = status.cpu_time_used.user_cpu_time resources.cpu_usage.system_cpu_time = status.cpu_time_used.system_cpu_time resources.network_bytes_sent = status.network_bytes_sent self.state.context.usage_stats.RegisterResources(resources)
def InitializeContext(self, args): """Initializes the context of this flow.""" if args is None: args = FlowRunnerArgs() output_collection = self._CreateOutputCollection(args) # Output collection is nullified when flow is terminated, so we're # keeping the urn separately for further reference. output_urn = (output_collection is not None) and output_collection.urn output_plugins_states = [] for plugin_descriptor in args.output_plugins: plugin_class = plugin_descriptor.GetPluginClass() plugin = plugin_class(output_urn, args=plugin_descriptor.plugin_args, token=self.token) try: plugin.Initialize() output_plugins_states.append((plugin_descriptor, plugin.state)) except Exception as e: # pylint: disable=broad-except self.Log("Plugin %s failed to initialize (%s), ignoring it." % (plugin, e)) context = utils.DataObject( args=args, backtrace=None, client_resources=rdf_client.ClientResources(), create_time=rdfvalue.RDFDatetime().Now(), creator=args.creator or self.token.username, current_state="Start", # If not None, kill-stuck-flow notification is scheduled at the given # time. kill_timestamp=None, network_bytes_sent=0, next_outbound_id=1, next_processed_request=1, next_states=set(), output=output_collection, output_plugins_states=output_plugins_states, output_urn=output_urn, outstanding_requests=0, remaining_cpu_quota=args.cpu_limit, state=rdf_flows.Flow.State.RUNNING, # Have we sent a notification to the user. user_notified=False, ) # Store the context in the flow_obj for next time. self.flow_obj.state.Register("context", context) return context
def InitializeContext(self, args): """Initializes the context of this hunt.""" if args is None: args = HuntRunnerArgs() # For large hunts, checking client limits creates a high load on the foreman # since it needs to read the hunt object's client list. We therefore don't # allow setting it for large hunts. Note that client_limit of 0 means # unlimited which is allowed (the foreman then does not need to check the # client list).. if args.client_limit > 1000: raise RuntimeError("Please specify client_limit <= 1000.") context = utils.DataObject( args=args, backtrace=None, client_resources=rdf_client.ClientResources(), create_time=rdfvalue.RDFDatetime().Now(), creator=self.token.username, expires=args.expiry_time.Expiry(), # If not None, kill-stuck-flow notification is scheduled at the given # time. kill_timestamp=None, network_bytes_sent=0, next_client_due=0, next_outbound_id=1, next_processed_request=1, next_states=set(), outstanding_requests=0, current_state=None, start_time=rdfvalue.RDFDatetime().Now(), # Hunts are always in the running state. state=rdf_flows.Flow.State.RUNNING, usage_stats=rdf_stats.ClientResourcesStats(), remaining_cpu_quota=args.cpu_limit, ) # Store the context in the flow_obj for next time. self.flow_obj.state.Register("context", context) return context