Beispiel #1
0
 def GenerateToken(self, username, reason):
   return access_control.ACLToken(username=username, reason=reason)
Beispiel #2
0
 def CheckCronJobAccess(self, username, cron_job_id):
     token = access_control.ACLToken(username=username)
     self.legacy_manager.CheckCronJobAccess(token, cron_job_id.ToURN())
def TestHuntHelperWithMultipleMocks(client_mocks,
                                    token=None,
                                    iteration_limit=None,
                                    worker=None):
    """Runs a hunt with a given set of clients mocks.

  Args:
    client_mocks: Dictionary of (client_id->client_mock) pairs. Client mock
      objects are used to handle client actions. Methods names of a client mock
      object correspond to client actions names. For an example of a client mock
      object, see SampleHuntMock.
    token: An instance of access_control.ACLToken security token.
    iteration_limit: If None, hunt will run until it's finished. Otherwise,
      worker_mock.Next() will be called iteration_limit number of times. Every
      iteration processes worker's message queue. If new messages are sent to
      the queue during the iteration processing, they will be processed on next
      iteration.
    worker: flow_test_lib.TestWorker object to use.

  Returns:
    A number of iterations complete.
  """

    if token is None:
        token = access_control.ACLToken(username="******")

    # Worker always runs with absolute privileges, therefore making the token
    # SetUID().
    token = token.SetUID()

    client_mocks = [
        flow_test_lib.MockClient(client_id, client_mock, token=token)
        for client_id, client_mock in client_mocks.items()
    ]

    if worker is None:
        rel_db_worker = flow_test_lib.TestWorker()
        data_store.REL_DB.RegisterFlowProcessingHandler(
            rel_db_worker.ProcessFlow)
    else:
        rel_db_worker = worker

    num_iterations = 0

    try:
        # Run the clients and worker until nothing changes any more.
        while iteration_limit is None or num_iterations < iteration_limit:
            data_store.REL_DB.delegate.WaitUntilNoFlowsToProcess(timeout=10)
            worker_processed = rel_db_worker.ResetProcessedFlows()

            client_processed = 0

            for client_mock in client_mocks:
                client_processed += int(client_mock.Next())

            num_iterations += 1

            if client_processed == 0 and not worker_processed:
                break

    finally:
        if worker is None:
            data_store.REL_DB.UnregisterFlowProcessingHandler(timeout=60)
            rel_db_worker.Shutdown()

    return num_iterations
Beispiel #4
0
 def testTokenWithUsernameAndReasonIsValid(self):
   token = access_control.ACLToken(username="******", reason="For testing")
   user_managers.ValidateToken(token, "aff4:/C.0000000000000001")
Beispiel #5
0
 def CheckClientAccess(self, username, client_id):
     token = access_control.ACLToken(username=username)
     self.legacy_manager.CheckClientAccess(token, client_id.ToClientURN())
Beispiel #6
0
 def __init__(self, run_state, job):
   self.run_state = run_state
   self.job = job
   self.token = access_control.ACLToken(username="******")
 def testRaisesIfTraitsSetInRequest(self):
     user = user_plugin.ApiGrrUser(
         interface_traits=user_plugin.ApiGrrUserInterfaceTraits())
     with self.assertRaises(ValueError):
         self.handler.Handle(user,
                             token=access_control.ACLToken(username=u"foo"))
Beispiel #8
0
 def __init__(self, run_state, job):
     self.run_state = run_state
     self.job = job
     self.token = access_control.ACLToken(username=CRON_JOB_USERNAME)
Beispiel #9
0
def main(argv):
  del argv  # Unused.

  if flags.FLAGS.version:
    print("GRR FUSE {}".format(config_server.VERSION["packageversion"]))
    return

  config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT,
                           "Context applied for all command line tools")
  server_startup.Init()

  if fuse is None:
    logging.fatal("""Could not start!
fusepy must be installed to run fuse_mount.py!
Try to run:
  pip install fusepy

inside your virtualenv.
""")
    sys.exit(1)

  if not flags.FLAGS.mountpoint:
    Usage()
    sys.exit(1)

  # We multiple inherit from GRRFuse and fuse.Operations. In the
  # case that fuse is present, we run the actual FUSE layer, since we have
  # fuse.Operations. In the case that fuse is not present, we have already
  # exited by now if we were run from the command line, and if we were not run
  # from the command line, we've been imported, and we run the tests using a
  # mock fuse.

  class FuseOperation(GRRFuse, fuse.Operations):
    pass

  root = flags.FLAGS.aff4path

  username = flags.FLAGS.username or getpass.getuser()
  data_store.default_token = access_control.ACLToken(
      username=username, reason=flags.FLAGS.reason or "fusemount")

  logging.info("fuse_mount.py is mounting %s at %s....", root,
               flags.FLAGS.mountpoint)

  refresh_policy = flags.FLAGS.refresh_policy

  if refresh_policy == "always":
    max_age_before_refresh = datetime.timedelta(0)
  elif refresh_policy == "never":
    # Set the max age to be the maximum possible time difference.
    max_age_before_refresh = datetime.timedelta.max
  elif refresh_policy == "if_older_than_max_age":
    max_age_before_refresh = datetime.timedelta(
        seconds=flags.FLAGS.max_age_before_refresh)
  else:
    # Otherwise, a flag outside the enum was given and the flag validator threw
    # an execption.
    pass

  fuse_operation = FuseOperation(
      root=root,
      token=data_store.default_token,
      max_age_before_refresh=max_age_before_refresh,
      ignore_cache=flags.FLAGS.ignore_cache,
      force_sparse_image=flags.FLAGS.force_sparse_image,
      sparse_image_threshold=flags.FLAGS.sparse_image_threshold,
      timeout=flags.FLAGS.timeout)

  fuse.FUSE(
      fuse_operation,
      flags.FLAGS.mountpoint,
      foreground=not flags.FLAGS.background)
Beispiel #10
0
    def testUsesCollectionTimeFiles(self, db: abstract_db.Database):
        context = _CreateContext(db)
        client_id = db_test_utils.InitializeClient(db)

        snapshot = rdf_objects.ClientSnapshot()
        snapshot.client_id = client_id
        snapshot.knowledge_base.os = "redox"
        db.WriteClientSnapshot(snapshot)

        with temp.AutoTempFilePath() as temp_filepath:
            fake_artifact_source = rdf_artifacts.ArtifactSource(
                type=rdf_artifacts.ArtifactSource.SourceType.FILE,
                attributes={
                    "paths": [temp_filepath],
                })

            fake_artifact = rdf_artifacts.Artifact(
                name="FakeArtifact",
                doc="Lorem ipsum.",
                sources=[fake_artifact_source])

            flow_args = rdf_artifacts.ArtifactCollectorFlowArgs()
            flow_args.artifact_list = [fake_artifact.name]
            flow_args.apply_parsers = False

            with io.open(temp_filepath, mode="wb") as temp_filedesc:
                temp_filedesc.write(b"OLD")

            with mock.patch.object(
                    artifact_registry, "REGISTRY",
                    artifact_registry.ArtifactRegistry()) as registry:
                registry.RegisterArtifact(fake_artifact)

                # First, we run the artifact collector to collect the old file and save
                # the flow id to parse the results later.
                flow_id = flow_test_lib.TestFlowHelper(
                    collectors.ArtifactCollectorFlow.__name__,
                    action_mocks.FileFinderClientMock(),
                    client_id=client_id,
                    args=flow_args,
                    token=access_control.ACLToken(username=context.username))

                flow_test_lib.FinishAllFlowsOnClient(client_id)

            with io.open(temp_filepath, mode="wb") as temp_filedesc:
                temp_filedesc.write(b"NEW")

            with mock.patch.object(
                    artifact_registry, "REGISTRY",
                    artifact_registry.ArtifactRegistry()) as registry:
                registry.RegisterArtifact(fake_artifact)

                # Now, we run the artifact collector again to collect the new file to
                # update to this version on the server. The parsing should be performed
                # against the previous flow.
                flow_test_lib.TestFlowHelper(
                    collectors.ArtifactCollectorFlow.__name__,
                    action_mocks.FileFinderClientMock(),
                    client_id=client_id,
                    args=flow_args,
                    token=access_control.ACLToken(username=context.username))

                flow_test_lib.FinishAllFlowsOnClient(client_id)

        class FakeFileParser(
                abstract_parser.SingleFileParser[rdfvalue.RDFBytes]):

            supported_artifacts = [fake_artifact.name]

            def ParseFile(
                self,
                knowledge_base: rdf_client.KnowledgeBase,
                pathspec: rdf_paths.PathSpec,
                filedesc: file_store.BlobStream,
            ) -> Iterable[rdfvalue.RDFBytes]:
                del knowledge_base, pathspec  # Unused.
                return [rdfvalue.RDFBytes(filedesc.Read())]

        with parser_test_lib._ParserContext("FakeFile", FakeFileParser):
            args = flow_plugin.ApiListParsedFlowResultsArgs(
                client_id=client_id, flow_id=flow_id, offset=0, count=1024)

            result = self.handler.Handle(args, context=context)

        self.assertEmpty(result.errors)
        self.assertLen(result.items, 1)

        response = result.items[0].payload
        self.assertEqual(response, b"OLD")
Beispiel #11
0
    def TerminateFlow(cls,
                      flow_id,
                      reason=None,
                      status=None,
                      token=None,
                      force=False):
        """Terminate a flow.

    Args:
      flow_id: The flow session_id to terminate.
      reason: A reason to log.
      status: Status code used in the generated status message.
      token: The access token to be used for this request.
      force: If True then terminate locked flows hard.

    Raises:
      FlowError: If the flow can not be found.
    """
        if not force:
            flow_obj = aff4.FACTORY.OpenWithLock(flow_id,
                                                 aff4_type=GRRFlow,
                                                 blocking=True,
                                                 token=token)
        else:
            flow_obj = aff4.FACTORY.Open(flow_id,
                                         aff4_type=GRRFlow,
                                         mode="rw",
                                         token=token)

        if not flow_obj:
            raise FlowError("Could not terminate flow %s" % flow_id)

        with flow_obj:
            runner = flow_obj.GetRunner()
            if not runner.IsRunning():
                return

            if token is None:
                token = access_control.ACLToken()

            if reason is None:
                reason = "Manual termination by console."

            # This calls runner.Terminate to kill the flow
            runner.Error(reason, status_code=status)

            flow_obj.Log("Terminated by user {0}. Reason: {1}".format(
                token.username, reason))

            # From now on we run with supervisor access
            super_token = token.SetUID()

            # Also terminate its children
            children_to_kill = aff4.FACTORY.MultiOpen(flow_obj.ListChildren(),
                                                      token=super_token,
                                                      aff4_type=GRRFlow)

            for child_obj in children_to_kill:
                cls.TerminateFlow(child_obj.urn,
                                  reason="Parent flow terminated.",
                                  token=super_token,
                                  force=force)
Beispiel #12
0
    def testUsesKnowledgebaseFromFlow(self, db: abstract_db.Database):
        context = _CreateContext(db)

        client_id = db_test_utils.InitializeClient(db)

        # This is the snapshot that is visible to the flow and should be used for
        # parsing results.
        snapshot = rdf_objects.ClientSnapshot()
        snapshot.client_id = client_id
        snapshot.knowledge_base.os = "redox"
        db.WriteClientSnapshot(snapshot)

        with mock.patch.object(
                artifact_registry, "REGISTRY",
                artifact_registry.ArtifactRegistry()) as registry:
            registry.RegisterArtifact(self.ECHO1337_ARTIFACT)

            flow_args = rdf_artifacts.ArtifactCollectorFlowArgs()
            flow_args.artifact_list = [self.ECHO1337_ARTIFACT.name]
            flow_args.apply_parsers = False

            flow_id = flow_test_lib.TestFlowHelper(
                collectors.ArtifactCollectorFlow.__name__,
                self.FakeExecuteCommand(),
                client_id=client_id,
                args=flow_args,
                token=access_control.ACLToken(username=context.username))

        class FakeParser(
                abstract_parser.SingleResponseParser[
                    rdf_client_action.ExecuteResponse], ):

            supported_artifacts = [self.ECHO1337_ARTIFACT.name]

            def ParseResponse(
                self,
                knowledge_base: rdf_client.KnowledgeBase,
                response: rdf_client_action.ExecuteResponse,
            ) -> Iterable[rdf_client_action.ExecuteResponse]:
                precondition.AssertType(response,
                                        rdf_client_action.ExecuteResponse)

                parsed_response = rdf_client_action.ExecuteResponse()
                parsed_response.stdout = response.stdout
                parsed_response.stderr = knowledge_base.os.encode("utf-8")
                return [parsed_response]

        # This is a snapshot written to the database after the responses were
        # collected, so this should not be used for parsing.
        snapshot = rdf_objects.ClientSnapshot()
        snapshot.client_id = client_id
        snapshot.knowledge_base.os = "linux"
        db.WriteClientSnapshot(snapshot)

        with parser_test_lib._ParserContext("Fake", FakeParser):
            args = flow_plugin.ApiListParsedFlowResultsArgs(
                client_id=client_id, flow_id=flow_id, offset=0, count=1024)

            result = self.handler.Handle(args, context=context)

        self.assertEmpty(result.errors)
        self.assertLen(result.items, 1)

        response = result.items[0].payload
        self.assertIsInstance(response, rdf_client_action.ExecuteResponse)
        self.assertEqual(response.stdout, b"1337")
        self.assertEqual(response.stderr.decode("utf-8"), "redox")
Beispiel #13
0
def StartFlow(client_id=None,
              cpu_limit=7200,
              creator=None,
              flow_args=None,
              flow_cls=None,
              network_bytes_limit=None,
              original_flow=None,
              output_plugins=None,
              parent_flow_obj=None,
              **kwargs):
    """The main factory function for creating and executing a new flow.

  Args:
    client_id: ID of the client this flow should run on.
    cpu_limit: CPU limit in seconds for this flow.
    creator: Username that requested this flow.
    flow_args: An arg protocol buffer which is an instance of the required
      flow's args_type class attribute.
    flow_cls: Class of the flow that should be started.
    network_bytes_limit: Limit on the network traffic this flow can generated.
    original_flow: A FlowReference object in case this flow was copied from
      another flow.
    output_plugins: An OutputPluginDescriptor object indicating what output
      plugins should be used for this flow.
    parent_flow_obj: A parent flow object. None if this is a top level flow.
    **kwargs: If args or runner_args are not specified, we construct these
      protobufs from these keywords.

  Returns:
    the flow id of the new flow.

  Raises:
    ValueError: Unknown or invalid parameters were provided.
  """
    # Is the required flow a known flow?
    try:
        registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
    except ValueError:
        stats_collector_instance.Get().IncrementCounter(
            "grr_flow_invalid_flow_count")
        raise ValueError("Unable to locate flow %s" % flow_cls.__name__)

    if not client_id:
        raise ValueError("Client_id is needed to start a flow.")

    # Now parse the flow args into the new object from the keywords.
    if flow_args is None:
        flow_args = flow_cls.args_type()

    FilterArgsFromSemanticProtobuf(flow_args, kwargs)
    # At this point we should exhaust all the keyword args. If any are left
    # over, we do not know what to do with them so raise.
    if kwargs:
        raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" %
                                   kwargs)

    # Check that the flow args are valid.
    flow_args.Validate()

    rdf_flow = rdf_flow_objects.Flow(client_id=client_id,
                                     flow_class_name=flow_cls.__name__,
                                     args=flow_args,
                                     create_time=rdfvalue.RDFDatetime.Now(),
                                     creator=creator,
                                     output_plugins=output_plugins,
                                     original_flow=original_flow,
                                     flow_state="RUNNING")

    rdf_flow.flow_id = "%08X" % random.PositiveUInt32()

    if parent_flow_obj:
        parent_rdf_flow = parent_flow_obj.rdf_flow
        rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id,
                                           rdf_flow.flow_id)
        rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
        rdf_flow.parent_request_id = parent_flow_obj.GetCurrentOutboundId()
        if parent_rdf_flow.creator:
            rdf_flow.creator = parent_rdf_flow.creator
    else:
        rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)

    if output_plugins:
        rdf_flow.output_plugins_states = GetOutputPluginStates(
            output_plugins,
            rdf_flow.long_flow_id,
            token=access_control.ACLToken(username=rdf_flow.creator))

    if network_bytes_limit is not None:
        rdf_flow.network_bytes_limit = network_bytes_limit
    if cpu_limit is not None:
        rdf_flow.cpu_limit = cpu_limit

    logging.info(u"Scheduling %s(%s) on %s", rdf_flow.long_flow_id,
                 rdf_flow.flow_class_name, client_id)

    flow_obj = flow_cls(rdf_flow)
    # Just run the first state inline. NOTE: Running synchronously means
    # that this runs on the thread that starts the flow. The advantage is
    # that that Start method can raise any errors immediately.
    flow_obj.Start()
    flow_obj.PersistState()

    # The flow does not need to actually remain running.
    if not flow_obj.outstanding_requests:
        flow_obj.RunStateMethod("End")
        flow_obj.MarkDone()

    data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow)

    if parent_flow_obj is not None:
        # We can optimize here and not write requests/responses to the database
        # since we have to do this for the parent flow at some point anyways.
        parent_flow_obj.MergeQueuedMessages(flow_obj)
    else:
        flow_obj.FlushQueuedMessages()

        # Publish an audit event, only for top level flows.
        # TODO(amoser): split urn field into dedicated strings.
        events.Events.PublishEvent(
            "Audit",
            rdf_events.AuditEvent(user=creator,
                                  action="RUN_FLOW",
                                  flow_name=rdf_flow.flow_class_name,
                                  urn=rdf_flow.long_flow_id,
                                  client=client_id))

    return rdf_flow.flow_id
Beispiel #14
0
  def GetApprovalForObject(object_urn, token=None, username=""):
    """Looks for approvals for an object and returns available valid tokens.

    Args:
      object_urn: Urn of the object we want access to.

      token: The token to use to lookup the ACLs.

      username: The user to get the approval for, if "" we get it from the
        token.

    Returns:
      A token for access to the object on success, otherwise raises.

    Raises:
      UnauthorizedAccess: If there are no valid approvals available.

    """
    if token is None:
      raise access_control.UnauthorizedAccess(
          "No token given, cannot authenticate.")

    if not username:
      username = token.username

    approvals_root_urn = aff4.ROOT_URN.Add("ACL").Add(
        object_urn.Path()).Add(username)

    children_urns = list(aff4.FACTORY.ListChildren(approvals_root_urn))
    if not children_urns:
      raise access_control.UnauthorizedAccess(
          "No approval found for user %s" % utils.SmartStr(username),
          subject=object_urn)

    last_error = None
    approvals = aff4.FACTORY.MultiOpen(
        children_urns,
        mode="r",
        aff4_type=Approval,
        age=aff4.ALL_TIMES,
        token=token)
    for approval in approvals:
      try:
        test_token = access_control.ACLToken(
            username=username, reason=approval.Get(approval.Schema.REASON))
        approval.CheckAccess(test_token)

        return test_token
      except access_control.UnauthorizedAccess as e:
        last_error = e

    if last_error:
      # We tried all possible approvals, but got no usable results.
      raise access_control.UnauthorizedAccess(last_error, subject=object_urn)
    else:
      # If last error is None, means that none of the URNs in children_urns
      # could be opened. This shouldn't really happen ever, but we have
      # to make sure to provide a meaningful error message.
      raise access_control.UnauthorizedAccess(
          "Couldn't open any of %d approvals "
          "for user %s" % (len(children_urns), utils.SmartStr(username)),
          subject=object_urn)
Beispiel #15
0
 def ListCronJobApprovals(self, requestor=None):
   requestor = requestor or self.token.username
   handler = api_user.ApiListCronJobApprovalsHandler()
   return handler.Handle(
       api_user.ApiListCronJobApprovalsArgs(),
       token=access_control.ACLToken(username=requestor)).items
Beispiel #16
0
def InitGRRRootAPI():

    return api.GrrApi(connector=api_shell_raw_access_lib.RawConnector(
        token=access_control.ACLToken(username="******"),
        page_size=_GRR_API_PAGE_SIZE)).root
Beispiel #17
0
 def SecurityCheck(self, func, request, *args, **kwargs):
     """A decorator applied to protected web handlers."""
     request.user = self.username
     request.token = access_control.ACLToken(username="******",
                                             reason="Just a test")
     return func(request, *args, **kwargs)
Beispiel #18
0
 def testRendersObjectForNonExistingUser(self):
     result = self.handler.Handle(
         None, token=access_control.ACLToken(username="******"))
     self.assertEqual(result.username, "foo")
def GetToken():
    # Extend for user authorization
    # SetUID is required to create and write to various aff4 paths when updating
    # config.
    return access_control.ACLToken(username="******").SetUID()
Beispiel #20
0
def StartFlow(client_id=None,
              cpu_limit=None,
              creator=None,
              flow_args=None,
              flow_cls=None,
              network_bytes_limit=None,
              original_flow=None,
              output_plugins=None,
              start_at=None,
              parent_flow_obj=None,
              parent_hunt_id=None,
              **kwargs):
    """The main factory function for creating and executing a new flow.

  Args:
    client_id: ID of the client this flow should run on.
    cpu_limit: CPU limit in seconds for this flow.
    creator: Username that requested this flow.
    flow_args: An arg protocol buffer which is an instance of the required
      flow's args_type class attribute.
    flow_cls: Class of the flow that should be started.
    network_bytes_limit: Limit on the network traffic this flow can generated.
    original_flow: A FlowReference object in case this flow was copied from
      another flow.
    output_plugins: An OutputPluginDescriptor object indicating what output
      plugins should be used for this flow.
    start_at: If specified, flow will be started not immediately, but at a given
      time.
    parent_flow_obj: A parent flow object. None if this is a top level flow.
    parent_hunt_id: String identifying parent hunt. Can't be passed together
      with parent_flow_obj.
    **kwargs: If args or runner_args are not specified, we construct these
      protobufs from these keywords.

  Returns:
    the flow id of the new flow.

  Raises:
    ValueError: Unknown or invalid parameters were provided.
  """

    if parent_flow_obj is not None and parent_hunt_id is not None:
        raise ValueError(
            "parent_flow_obj and parent_hunt_id are mutually exclusive.")

    # Is the required flow a known flow?
    try:
        registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
    except ValueError:
        stats_collector_instance.Get().IncrementCounter(
            "grr_flow_invalid_flow_count")
        raise ValueError("Unable to locate flow %s" % flow_cls.__name__)

    if not client_id:
        raise ValueError("Client_id is needed to start a flow.")

    # Now parse the flow args into the new object from the keywords.
    if flow_args is None:
        flow_args = flow_cls.args_type()

    FilterArgsFromSemanticProtobuf(flow_args, kwargs)
    # At this point we should exhaust all the keyword args. If any are left
    # over, we do not know what to do with them so raise.
    if kwargs:
        raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" %
                                   kwargs)

    # Check that the flow args are valid.
    flow_args.Validate()

    rdf_flow = rdf_flow_objects.Flow(client_id=client_id,
                                     flow_class_name=flow_cls.__name__,
                                     args=flow_args,
                                     create_time=rdfvalue.RDFDatetime.Now(),
                                     creator=creator,
                                     output_plugins=output_plugins,
                                     original_flow=original_flow,
                                     flow_state="RUNNING")

    if parent_hunt_id is not None and parent_flow_obj is None:
        rdf_flow.flow_id = parent_hunt_id
        if IsLegacyHunt(parent_hunt_id):
            rdf_flow.flow_id = rdf_flow.flow_id[2:]
    else:
        rdf_flow.flow_id = RandomFlowId()

    # For better performance, only do conflicting IDs check for top-level flows.
    if not parent_flow_obj:
        try:
            data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
            raise CanNotStartFlowWithExistingIdError(client_id,
                                                     rdf_flow.flow_id)
        except db.UnknownFlowError:
            pass

    if parent_flow_obj:  # A flow is a nested flow.
        parent_rdf_flow = parent_flow_obj.rdf_flow
        rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id,
                                           rdf_flow.flow_id)
        rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
        rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
        rdf_flow.parent_request_id = parent_flow_obj.GetCurrentOutboundId()
        if parent_rdf_flow.creator:
            rdf_flow.creator = parent_rdf_flow.creator
    elif parent_hunt_id:  # A flow is a root-level hunt-induced flow.
        rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
        rdf_flow.parent_hunt_id = parent_hunt_id
    else:  # A flow is a root-level non-hunt flow.
        rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)

    if output_plugins:
        rdf_flow.output_plugins_states = GetOutputPluginStates(
            output_plugins,
            rdf_flow.long_flow_id,
            token=access_control.ACLToken(username=rdf_flow.creator))

    if network_bytes_limit is not None:
        rdf_flow.network_bytes_limit = network_bytes_limit
    if cpu_limit is not None:
        rdf_flow.cpu_limit = cpu_limit

    logging.info(u"Scheduling %s(%s) on %s (%s)", rdf_flow.long_flow_id,
                 rdf_flow.flow_class_name, client_id, start_at or "now")

    rdf_flow.current_state = "Start"

    flow_obj = flow_cls(rdf_flow)
    if start_at is None:

        # Store an initial version of the flow straight away. This is needed so the
        # database doesn't raise consistency errors due to missing parent keys when
        # writing logs / errors / results which might happen in Start().
        data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow)

        # Just run the first state inline. NOTE: Running synchronously means
        # that this runs on the thread that starts the flow. The advantage is
        # that that Start method can raise any errors immediately.
        flow_obj.Start()

        # The flow does not need to actually remain running.
        if not flow_obj.outstanding_requests:
            flow_obj.RunStateMethod("End")
            # Additional check for the correct state in case the End method raised and
            # terminated the flow.
            if flow_obj.IsRunning():
                flow_obj.MarkDone()
    else:
        flow_obj.CallState("Start", start_time=start_at)

    flow_obj.PersistState()

    data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow)

    if parent_flow_obj is not None:
        # We can optimize here and not write requests/responses to the database
        # since we have to do this for the parent flow at some point anyways.
        parent_flow_obj.MergeQueuedMessages(flow_obj)
    else:
        flow_obj.FlushQueuedMessages()

        # Publish an audit event, only for top level flows.
        # TODO(amoser): split urn field into dedicated strings.
        events.Events.PublishEvent(
            "Audit",
            rdf_events.AuditEvent(user=creator,
                                  action="RUN_FLOW",
                                  flow_name=rdf_flow.flow_class_name,
                                  urn=rdf_flow.long_flow_id,
                                  client=client_id))

    return rdf_flow.flow_id
Beispiel #21
0
 def __init__(self):
     self.token = access_control.ACLToken(username="******",
                                          reason="Implied.")
     self.token.supervisor = True
Beispiel #22
0
    def setUp(self):
        super(GRRBaseTest, self).setUp()

        test_user = u"test"

        system_users_patcher = mock.patch.object(
            access_control, "SYSTEM_USERS",
            frozenset(itertools.chain(access_control.SYSTEM_USERS,
                                      [test_user])))
        system_users_patcher.start()
        self.addCleanup(system_users_patcher.stop)

        self.token = access_control.ACLToken(username=test_user,
                                             reason="Running tests")

        self.temp_dir = temp.TempDirPath()
        config.CONFIG.SetWriteBack(
            os.path.join(self.temp_dir, "writeback.yaml"))
        self.addCleanup(
            lambda: shutil.rmtree(self.temp_dir, ignore_errors=True))

        # Each datastore is wrapped with DatabaseValidationWrapper, so we have
        # to access the delegate directly (assuming it's an InMemoryDB
        # implementation).
        data_store.REL_DB.delegate.ClearTestDB()

        email_alerts.InitializeEmailAlerterOnce()

        # Stub out the email function
        self.emails_sent = []

        def SendEmailStub(to_user, from_user, subject, message,
                          **unused_kwargs):
            self.emails_sent.append((to_user, from_user, subject, message))

        self.mail_stubber = utils.MultiStubber(
            (email_alerts.EMAIL_ALERTER, "SendEmail", SendEmailStub),
            (email.utils, "make_msgid", lambda: "<message id stub>"))
        self.mail_stubber.Start()
        self.addCleanup(self.mail_stubber.Stop)

        # We don't want to send actual email in our tests
        self.smtp_patcher = mock.patch("smtplib.SMTP")
        self.mock_smtp = self.smtp_patcher.start()
        self.addCleanup(self.smtp_patcher.stop)

        def DisabledSet(*unused_args, **unused_kw):
            raise NotImplementedError(
                "Usage of Set() is disabled, please use a configoverrider in tests."
            )

        self.config_set_disable = utils.Stubber(config.CONFIG, "Set",
                                                DisabledSet)
        self.config_set_disable.Start()
        self.addCleanup(self.config_set_disable.Stop)

        self._SetupFakeStatsContext()

        # Turn off WithLimitedCallFrequency-based caching in tests. Tests that need
        # to test caching behavior explicitly, should turn it on explicitly.
        with_limited_call_frequency_stubber = utils.Stubber(
            cache, "WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH", True)
        with_limited_call_frequency_stubber.Start()
        self.addCleanup(with_limited_call_frequency_stubber.Stop)
Beispiel #23
0
 def testTokenWithoutUsernameIsNotValid(self):
   token = access_control.ACLToken(reason="For testing")
   with self.assertRaises(access_control.UnauthorizedAccess):
     user_managers.ValidateToken(token, "aff4:/C.0000000000000001")
Beispiel #24
0
def TestHuntHelperWithMultipleMocks(client_mocks,
                                    check_flow_errors=False,
                                    token=None,
                                    iteration_limit=None,
                                    worker=None):
    """Runs a hunt with a given set of clients mocks.

  Args:
    client_mocks: Dictionary of (client_id->client_mock) pairs. Client mock
      objects are used to handle client actions. Methods names of a client mock
      object correspond to client actions names. For an example of a client mock
      object, see SampleHuntMock.
    check_flow_errors: If True, raises when one of hunt-initiated flows fails.
    token: An instance of access_control.ACLToken security token.
    iteration_limit: If None, hunt will run until it's finished. Otherwise,
      worker_mock.Next() will be called iteration_limit number of times. Every
      iteration processes worker's message queue. If new messages are sent to
      the queue during the iteration processing, they will be processed on next
      iteration.
    worker: flow_test_lib.TestWorker object to use.

  Returns:
    A number of iterations complete.
  """

    if token is None:
        token = access_control.ACLToken(username="******")

    total_flows = set()

    # Worker always runs with absolute privileges, therefore making the token
    # SetUID().
    token = token.SetUID()

    client_mocks = [
        flow_test_lib.MockClient(client_id, client_mock, token=token)
        for client_id, client_mock in iteritems(client_mocks)
    ]

    if worker is None:
        rel_db_worker = flow_test_lib.TestWorker(threadpool_size=0, token=True)
        data_store.REL_DB.RegisterFlowProcessingHandler(
            rel_db_worker.ProcessFlow)
    else:
        rel_db_worker = worker

    num_iterations = 0

    try:
        worker_mock = worker_test_lib.MockWorker(
            check_flow_errors=check_flow_errors, token=token)

        # Run the clients and worker until nothing changes any more.
        while iteration_limit is None or num_iterations < iteration_limit:
            worker_processed = []
            if data_store.RelationalDBEnabled():
                data_store.REL_DB.delegate.WaitUntilNoFlowsToProcess(
                    timeout=10)
                worker_processed = rel_db_worker.ResetProcessedFlows()

            client_processed = 0

            for client_mock in client_mocks:
                client_processed += client_mock.Next()

            flows_run = []

            for flow_run in worker_mock.Next():
                total_flows.add(flow_run)
                flows_run.append(flow_run)

            flows_run.extend(worker_processed)

            num_iterations += 1

            if client_processed == 0 and not flows_run and not worker_processed:
                break

        if check_flow_errors:
            flow_test_lib.CheckFlowErrors(total_flows, token=token)
    finally:
        if worker is None:
            data_store.REL_DB.UnregisterFlowProcessingHandler(timeout=60)
            rel_db_worker.Shutdown()

    return num_iterations
Beispiel #25
0
 def CheckHuntAccess(self, username, hunt_id):
     token = access_control.ACLToken(username=username)
     self.legacy_manager.CheckHuntAccess(token, hunt_id.ToURN())
Beispiel #26
0
def GetToken():
    user = getpass.getuser()
    return access_control.ACLToken(username=user)
Beispiel #27
0
 def CheckIfCanStartClientFlow(self, username, flow_name):
     token = access_control.ACLToken(username=username)
     self.legacy_manager.CheckIfCanStartFlow(token, flow_name)
Beispiel #28
0
def StartFlow(client_id=None,
              cpu_limit=None,
              creator=None,
              flow_args=None,
              flow_cls=None,
              network_bytes_limit=None,
              original_flow=None,
              output_plugins=None,
              start_at=None,
              parent=None,
              runtime_limit=None,
              **kwargs):
  """The main factory function for creating and executing a new flow.

  Args:
    client_id: ID of the client this flow should run on.
    cpu_limit: CPU limit in seconds for this flow.
    creator: Username that requested this flow.
    flow_args: An arg protocol buffer which is an instance of the required
      flow's args_type class attribute.
    flow_cls: Class of the flow that should be started.
    network_bytes_limit: Limit on the network traffic this flow can generated.
    original_flow: A FlowReference object in case this flow was copied from
      another flow.
    output_plugins: An OutputPluginDescriptor object indicating what output
      plugins should be used for this flow.
    start_at: If specified, flow will be started not immediately, but at a given
      time.
    parent: A FlowParent referencing the parent, or None for top-level flows.
    runtime_limit: Runtime limit as Duration for all ClientActions.
    **kwargs: If args or runner_args are not specified, we construct these
      protobufs from these keywords.
  Returns:
    the flow id of the new flow.

  Raises:
    ValueError: Unknown or invalid parameters were provided.
  """
  # Is the required flow a known flow?
  try:
    registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
  except ValueError:
    GRR_FLOW_INVALID_FLOW_COUNT.Increment()
    raise ValueError("Unable to locate flow %s" % flow_cls.__name__)

  if not client_id:
    raise ValueError("Client_id is needed to start a flow.")

  # Now parse the flow args into the new object from the keywords.
  if flow_args is None:
    flow_args = flow_cls.args_type()

  FilterArgsFromSemanticProtobuf(flow_args, kwargs)
  # At this point we should exhaust all the keyword args. If any are left
  # over, we do not know what to do with them so raise.
  if kwargs:
    raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" % kwargs)

  # Check that the flow args are valid.
  flow_args.Validate()

  rdf_flow = rdf_flow_objects.Flow(
      client_id=client_id,
      flow_class_name=flow_cls.__name__,
      args=flow_args,
      create_time=rdfvalue.RDFDatetime.Now(),
      creator=creator,
      output_plugins=output_plugins,
      original_flow=original_flow,
      flow_state="RUNNING")

  if parent is None:
    parent = FlowParent.FromRoot()

  if parent.is_hunt or parent.is_scheduled_flow:
    # When starting a flow from a hunt or ScheduledFlow, re-use the parent's id
    # to make it easy to find flows. For hunts, every client has a top-level
    # flow with the hunt's id.
    rdf_flow.flow_id = parent.id
  else:  # For new top-level and child flows, assign a random ID.
    rdf_flow.flow_id = RandomFlowId()

  # For better performance, only do conflicting IDs check for top-level flows.
  if not parent.is_flow:
    try:
      data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
      raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
    except db.UnknownFlowError:
      pass

  if parent.is_flow:  # A flow is a nested flow.
    parent_rdf_flow = parent.flow_obj.rdf_flow
    rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id,
                                       rdf_flow.flow_id)
    rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
    rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
    rdf_flow.parent_request_id = parent.flow_obj.GetCurrentOutboundId()
    if parent_rdf_flow.creator:
      rdf_flow.creator = parent_rdf_flow.creator
  elif parent.is_hunt:  # Root-level hunt-induced flow.
    rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
    rdf_flow.parent_hunt_id = parent.id
  elif parent.is_root or parent.is_scheduled_flow:
    # A flow is a root-level non-hunt flow.
    rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
  else:
    raise ValueError(f"Unknown flow parent type {parent}")

  if output_plugins:
    rdf_flow.output_plugins_states = GetOutputPluginStates(
        output_plugins,
        rdf_flow.long_flow_id,
        token=access_control.ACLToken(username=rdf_flow.creator))

  if network_bytes_limit is not None:
    rdf_flow.network_bytes_limit = network_bytes_limit
  if cpu_limit is not None:
    rdf_flow.cpu_limit = cpu_limit
  if runtime_limit is not None:
    rdf_flow.runtime_limit_us = runtime_limit

  logging.info(u"Starting %s(%s) on %s (%s)", rdf_flow.long_flow_id,
               rdf_flow.flow_class_name, client_id, start_at or "now")

  rdf_flow.current_state = "Start"

  flow_obj = flow_cls(rdf_flow)

  # Prevent a race condition, where a flow is scheduled twice, because one
  # worker inserts the row and another worker silently updates the existing row.
  allow_update = False

  if start_at is None:
    # Store an initial version of the flow straight away. This is needed so the
    # database doesn't raise consistency errors due to missing parent keys when
    # writing logs / errors / results which might happen in Start().
    try:
      data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow, allow_update=False)
    except db.FlowExistsError:
      raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)

    allow_update = True

    try:
      # Just run the first state inline. NOTE: Running synchronously means
      # that this runs on the thread that starts the flow. The advantage is
      # that that Start method can raise any errors immediately.
      flow_obj.Start()

      # The flow does not need to actually remain running.
      if not flow_obj.outstanding_requests:
        flow_obj.RunStateMethod("End")
        # Additional check for the correct state in case the End method raised
        # and terminated the flow.
        if flow_obj.IsRunning():
          flow_obj.MarkDone()
    except Exception as e:  # pylint: disable=broad-except
      # We catch all exceptions that happen in Start() and mark the flow as
      # failed.
      msg = compatibility.NativeStr(e)
      if compatibility.PY2:
        msg = msg.decode("utf-8", "replace")

      flow_obj.Error(error_message=msg, backtrace=traceback.format_exc())

  else:
    flow_obj.CallState("Start", start_time=start_at)

  flow_obj.PersistState()

  try:
    data_store.REL_DB.WriteFlowObject(
        flow_obj.rdf_flow, allow_update=allow_update)
  except db.FlowExistsError:
    raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)

  if parent.is_flow:
    # We can optimize here and not write requests/responses to the database
    # since we have to do this for the parent flow at some point anyways.
    parent.flow_obj.MergeQueuedMessages(flow_obj)
  else:
    flow_obj.FlushQueuedMessages()

  return rdf_flow.flow_id