Esempio n. 1
0
  def End(self):
    """Finalize client registration."""
    # Update summary and publish to the Discovery queue.

    if data_store.RelationalDBWriteEnabled():
      try:
        data_store.REL_DB.WriteClient(self.state.client)
      except db.UnknownClientError:
        pass

    if data_store.RelationalDBReadEnabled():
      summary = self.state.client.GetSummary()
      summary.client_id = self.client_id
    else:
      client = self._OpenClient()
      summary = client.GetSummary()

    self.Publish("Discovery", summary)
    self.SendReply(summary)

    # Update the client index
    client_index.CreateClientIndex(token=self.token).AddClient(client)
    if data_store.RelationalDBWriteEnabled():
      try:
        index = client_index.ClientIndex()
        index.AddClient(self.client_id.Basename(), self.state.client)
      except db.UnknownClientError:
        # TODO(amoser): Remove after data migration.
        pass
Esempio n. 2
0
  def Start(self):
    """Sign the CSR from the client."""

    if self.args.csr.type != rdf_crypto.Certificate.Type.CSR:
      raise ValueError("Must be called with CSR")

    csr = rdf_crypto.CertificateSigningRequest(self.args.csr.pem)
    # Verify the CSR. This is not strictly necessary but doesn't harm either.
    try:
      csr.Verify(csr.GetPublicKey())
    except rdf_crypto.VerificationError:
      raise flow.FlowError("CSR for client %s did not verify: %s" %
                           (self.client_id, csr.AsPEM()))

    # Verify that the CN is of the correct form. The common name should refer
    # to a client URN.
    self.cn = rdf_client.ClientURN.FromPublicKey(csr.GetPublicKey())
    if self.cn != csr.GetCN():
      raise ValueError("CSR CN %s does not match public key %s." % (csr.GetCN(),
                                                                    self.cn))

    logging.info("Will sign CSR for: %s", self.cn)

    cert = rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr)

    # This check is important to ensure that the client id reported in the
    # source of the enrollment request is the same as the one in the
    # certificate. We use the ClientURN to ensure this is also of the correct
    # form for a client name.
    if self.cn != self.client_id:
      raise flow.FlowError("Certificate name %s mismatch for client %s",
                           self.cn, self.client_id)

    with aff4.FACTORY.Create(
        self.client_id, aff4_grr.VFSGRRClient, mode="rw",
        token=self.token) as client:
      # Set and write the certificate to the client record.
      now = rdfvalue.RDFDatetime.Now()
      client.Set(client.Schema.CERT, cert)
      client.Set(client.Schema.FIRST_SEEN, now)
      if data_store.RelationalDBWriteEnabled():
        data_store.REL_DB.WriteClientMetadata(
            self.client_id.Basename(),
            certificate=cert,
            first_seen=now,
            fleetspeak_enabled=False)

      index = client_index.CreateClientIndex(token=self.token)
      index.AddClient(client)
      if data_store.RelationalDBWriteEnabled():
        index = client_index.ClientIndex()
        index.AddClient(self.client_id.Basename(),
                        data_migration.ConvertVFSGRRClient(client))

    # Publish the client enrollment message.
    self.Publish("ClientEnrollment", self.client_id)

    self.Log("Enrolled %s successfully", self.client_id)
Esempio n. 3
0
    def EnrolFleetspeakClient(self, client_id):
        """Enrols a Fleetspeak-enabled client for use with GRR."""
        client_urn = rdf_client.ClientURN(client_id)

        # If already enrolled, return.
        if data_store.RelationalDBReadEnabled():
            if data_store.REL_DB.ReadClientMetadata(client_id):
                return
        else:
            if aff4.FACTORY.ExistsWithType(client_urn,
                                           aff4_type=aff4_grr.VFSGRRClient,
                                           token=self.token):
                return

        logging.info("Enrolling a new Fleetspeak client: %r", client_id)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id,
                                                  fleetspeak_enabled=True)

        # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
        # catch exceptions from it and forward them to Fleetspeak by failing its
        # gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
        # instance of the GRR frontend.
        with aff4.FACTORY.Create(client_urn,
                                 aff4_type=aff4_grr.VFSGRRClient,
                                 mode="rw",
                                 token=self.token) as client:

            client.Set(client.Schema.FLEETSPEAK_ENABLED,
                       rdfvalue.RDFBool(True))

            index = client_index.CreateClientIndex(token=self.token)
            index.AddClient(client)
            if data_store.RelationalDBWriteEnabled():
                index = client_index.ClientIndex()
                index.AddClient(data_migration.ConvertVFSGRRClient(client))

        enrollment_session_id = rdfvalue.SessionID(queue=queues.ENROLLMENT,
                                                   flow_name="Enrol")

        publish_msg = rdf_flows.GrrMessage(
            payload=client_urn,
            session_id=enrollment_session_id,
            # Fleetspeak ensures authentication.
            auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
            source=enrollment_session_id,
            priority=rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY)

        # Publish the client enrollment message.
        events.Events.PublishEvent("ClientEnrollment",
                                   publish_msg,
                                   token=self.token)
Esempio n. 4
0
    def ProcessMessage(self, message=None, event=None):
        """Handle a startup event."""
        _ = event
        # We accept unauthenticated messages so there are no errors but we don't
        # store the results.
        if (message.auth_state !=
                rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
            return

        client_id = message.source
        new_si = message.payload
        drift = rdfvalue.Duration("5m")

        if data_store.RelationalDBReadEnabled():
            current_si = data_store.REL_DB.ReadClientStartupInfo(
                client_id.Basename())

            # We write the updated record if the client_info has any changes
            # or the boot time is more than 5 minutes different.
            if (not current_si or current_si.client_info != new_si.client_info
                    or not current_si.boot_time
                    or abs(current_si.boot_time - new_si.boot_time) > drift):
                data_store.REL_DB.WriteClientStartupInfo(
                    client_id.Basename(), new_si)

        else:
            changes = False
            with aff4.FACTORY.Create(client_id,
                                     aff4_grr.VFSGRRClient,
                                     mode="rw",
                                     token=self.token) as client:
                old_info = client.Get(client.Schema.CLIENT_INFO)
                old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0)

                info = new_si.client_info

                # Only write to the datastore if we have new information.
                if info != old_info:
                    client.Set(client.Schema.CLIENT_INFO(info))
                    changes = True

                client.AddLabels(info.labels, owner="GRR")

                # Allow for some drift in the boot times (5 minutes).
                if not old_boot or abs(old_boot - new_si.boot_time) > drift:
                    client.Set(client.Schema.LAST_BOOT_TIME(new_si.boot_time))
                    changes = True

            if data_store.RelationalDBWriteEnabled() and changes:
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    pass

        events.Events.PublishEventInline("ClientStartup",
                                         message,
                                         token=self.token)
Esempio n. 5
0
    def RecordFleetspeakClientPing(self, client_id):
        """Records the last client contact in the datastore."""
        ping = rdfvalue.RDFDatetime.Now()
        with aff4.FACTORY.Create(rdf_client.ClientURN(client_id),
                                 aff4_type=aff4_grr.VFSGRRClient,
                                 mode="w",
                                 token=self.token,
                                 force_new_version=False) as client:
            client.Set(client.Schema.PING, ping)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id, last_ping=ping)
Esempio n. 6
0
    def ProcessKnowledgeBase(self, responses):
        """Collect and store any extra non-kb artifacts."""
        if not responses.success:
            raise flow.FlowError(
                "Error while collecting the knowledge base: %s" %
                responses.status)

        kb = responses.First()
        # AFF4 client.
        client = self._OpenClient(mode="rw")
        client.Set(client.Schema.KNOWLEDGE_BASE, kb)

        # Copy usernames.
        usernames = [user.username for user in kb.users if user.username]
        client.AddAttribute(client.Schema.USERNAMES(" ".join(usernames)))

        self.CopyOSReleaseFromKnowledgeBase(kb, client)
        client.Flush()

        # objects.ClientSnapshot.

        # Information already present in the knowledge base takes precedence.
        if not kb.os:
            kb.os = self.state.system

        if not kb.fqdn:
            kb.fqdn = self.state.fqdn
        self.state.client.knowledge_base = kb

        artifact_list = [
            "WMILogicalDisks", "RootDiskVolumeUsage",
            "WMIComputerSystemProduct", "LinuxHardwareInfo",
            "OSXSPHardwareDataType"
        ]
        self.CallFlow(collectors.ArtifactCollectorFlow.__name__,
                      artifact_list=artifact_list,
                      next_state="ProcessArtifactResponses")

        # Update the client index for the AFF4 client.
        client_index.CreateClientIndex(token=self.token).AddClient(client)

        if data_store.RelationalDBWriteEnabled():
            try:
                # Update the client index for the objects.ClientSnapshot.
                client_index.ClientIndex().AddClient(self.client_id.Basename(),
                                                     self.state.client)
            except db.UnknownClientError:
                pass
Esempio n. 7
0
    def Handle(self, args, token=None):
        if args.username or args.HasField("interface_traits"):
            raise ValueError("Only user settings can be updated.")

        with aff4.FACTORY.Create(aff4.ROOT_URN.Add("users").Add(
                token.username),
                                 aff4_type=aff4_users.GRRUser,
                                 mode="w",
                                 token=token) as user_fd:
            user_fd.Set(user_fd.Schema.GUI_SETTINGS(args.settings))

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteGRRUser(
                token.username,
                ui_mode=args.settings.mode,
                canary_mode=args.settings.canary_mode)
Esempio n. 8
0
    def WriteAllCrashDetails(self,
                             client_id,
                             crash_details,
                             flow_session_id=None,
                             hunt_session_id=None):
        # Update last crash attribute of the client.

        # AFF4.
        with aff4.FACTORY.Create(client_id,
                                 aff4_grr.VFSGRRClient,
                                 token=self.token) as client_obj:
            client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details))

        # Relational db.
        if data_store.RelationalDBWriteEnabled():
            try:
                data_store.REL_DB.WriteClientCrashInfo(client_id.Basename(),
                                                       crash_details)
            except db.UnknownClientError:
                pass

        # Duplicate the crash information in a number of places so we can find it
        # easily.
        client_crashes = aff4_grr.VFSGRRClient.CrashCollectionURNForCID(
            client_id)
        self._AppendCrashDetails(client_crashes, crash_details)

        if flow_session_id:
            with aff4.FACTORY.Open(flow_session_id,
                                   flow.GRRFlow,
                                   mode="rw",
                                   age=aff4.NEWEST_TIME,
                                   token=self.token) as aff4_flow:
                aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details))

            hunt_session_id = self._ExtractHuntId(flow_session_id)
            if hunt_session_id and hunt_session_id != flow_session_id:
                hunt_obj = aff4.FACTORY.Open(hunt_session_id,
                                             aff4_type=implementation.GRRHunt,
                                             mode="rw",
                                             token=self.token)
                hunt_obj.RegisterCrash(crash_details)
Esempio n. 9
0
    def Handle(self, args, token=None):
        audit_description = ",".join([
            token.username + u"." + utils.SmartUnicode(name)
            for name in args.labels
        ])
        audit_events = []

        try:
            index = client_index.CreateClientIndex(token=token)
            client_objs = aff4.FACTORY.MultiOpen(
                [cid.ToClientURN() for cid in args.client_ids],
                aff4_type=aff4_grr.VFSGRRClient,
                mode="rw",
                token=token)
            for client_obj in client_objs:
                if data_store.RelationalDBWriteEnabled():
                    cid = client_obj.urn.Basename()
                    data_store.REL_DB.RemoveClientLabels(
                        cid, token.username, args.labels)
                    labels_to_remove = set(args.labels)
                    existing_labels = data_store.REL_DB.GetClientLabels(cid)
                    for label in existing_labels:
                        labels_to_remove.discard(label.name)
                    if labels_to_remove:
                        idx = client_index.ClientIndex()
                        idx.RemoveClientLabels(cid, labels_to_remove)

                index.RemoveClientLabels(client_obj)
                self.RemoveClientLabels(client_obj, args.labels)
                index.AddClient(client_obj)
                client_obj.Close()

                audit_events.append(
                    events.AuditEvent(
                        user=token.username,
                        action="CLIENT_REMOVE_LABEL",
                        flow_name="handler.ApiRemoveClientsLabelsHandler",
                        client=client_obj.urn,
                        description=audit_description))
        finally:
            events.Events.PublishMultipleEvents(
                {audit.AUDIT_EVENT: audit_events}, token=token)
Esempio n. 10
0
    def Handle(self, args, token=None):
        audit_description = ",".join([
            token.username + u"." + utils.SmartUnicode(name)
            for name in args.labels
        ])
        audit_events = []

        try:
            index = client_index.CreateClientIndex(token=token)
            client_objs = aff4.FACTORY.MultiOpen(
                [cid.ToClientURN() for cid in args.client_ids],
                aff4_type=aff4_grr.VFSGRRClient,
                mode="rw",
                token=token)
            for client_obj in client_objs:
                if data_store.RelationalDBWriteEnabled():
                    cid = client_obj.urn.Basename()
                    try:
                        data_store.REL_DB.AddClientLabels(
                            cid, token.username, args.labels)
                        idx = client_index.ClientIndex()
                        idx.AddClientLabels(cid, args.labels)
                    except db.UnknownClientError:
                        # TODO(amoser): Remove after data migration.
                        pass

                client_obj.AddLabels(args.labels)
                index.AddClient(client_obj)
                client_obj.Close()

                audit_events.append(
                    events.AuditEvent(
                        user=token.username,
                        action="CLIENT_ADD_LABEL",
                        flow_name="handler.ApiAddClientsLabelsHandler",
                        client=client_obj.urn,
                        description=audit_description))
        finally:
            events.Events.PublishMultipleEvents(
                {audit.AUDIT_EVENT: audit_events}, token=token)
Esempio n. 11
0
  def Execute(self, thread_count):
    """Runs the migration procedure.

    Args:
      thread_count: A number of threads to execute the migration with.

    Raises:
      AssertionError: If not all clients have been migrated.
      ValueError: If the relational database backend is not available.
    """
    if not data_store.RelationalDBWriteEnabled():
      raise ValueError("No relational database available.")

    sys.stdout.write("Collecting clients...\n")
    client_urns = _GetClientUrns()

    sys.stdout.write("Clients to migrate: {}\n".format(len(client_urns)))
    sys.stdout.write("Threads to use: {}\n".format(thread_count))

    self._total_count = len(client_urns)
    self._migrated_count = 0
    self._start_time = rdfvalue.RDFDatetime.Now()

    batches = utils.Grouper(client_urns, _CLIENT_BATCH_SIZE)

    self._Progress()
    tp = pool.ThreadPool(processes=thread_count)
    tp.map(self._MigrateBatch, list(batches))
    self._Progress()

    if self._migrated_count == self._total_count:
      message = "\nMigration has been finished (migrated {} clients).\n".format(
          self._migrated_count)
      sys.stdout.write(message)
    else:
      message = "Not all clients have been migrated ({}/{})".format(
          self._migrated_count, self._total_count)
      raise AssertionError(message)
Esempio n. 12
0
 def __init__(self):
     if not data_store.RelationalDBWriteEnabled():
         raise ValueError("No relational database available.")
Esempio n. 13
0
    def VerifyMessageSignature(self, response_comms, packed_message_list,
                               cipher, cipher_verified, api_version,
                               remote_public_key):
        """Verifies the message list signature.

    In the server we check that the timestamp is later than the ping timestamp
    stored with the client. This ensures that client responses can not be
    replayed.

    Args:
      response_comms: The raw response_comms rdfvalue.
      packed_message_list: The PackedMessageList rdfvalue from the server.
      cipher: The cipher object that should be used to verify the message.
      cipher_verified: If True, the cipher's signature is not verified again.
      api_version: The api version we should use.
      remote_public_key: The public key of the source.
    Returns:
      An rdf_flows.GrrMessage.AuthorizationState.
    """
        if (not cipher_verified
                and not cipher.VerifyCipherSignature(remote_public_key)):
            stats.STATS.IncrementCounter("grr_unauthenticated_messages")
            return rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED

        try:
            client_id = cipher.cipher_metadata.source
            try:
                client = self.client_cache.Get(client_id)
            except KeyError:
                client = aff4.FACTORY.Create(
                    client_id,
                    aff4.AFF4Object.classes["VFSGRRClient"],
                    mode="rw",
                    token=self.token)
                self.client_cache.Put(client_id, client)
                stats.STATS.SetGaugeValue(
                    "grr_frontendserver_client_cache_size",
                    len(self.client_cache))

            ip = response_comms.orig_request.source_ip
            client.Set(client.Schema.CLIENT_IP(ip))

            # The very first packet we see from the client we do not have its clock
            remote_time = client.Get(
                client.Schema.CLOCK) or rdfvalue.RDFDatetime(0)
            client_time = packed_message_list.timestamp or rdfvalue.RDFDatetime(
                0)

            # This used to be a strict check here so absolutely no out of
            # order messages would be accepted ever. Turns out that some
            # proxies can send your request with some delay even if the
            # client has already timed out (and sent another request in
            # the meantime, making the first one out of order). In that
            # case we would just kill the whole flow as a
            # precaution. Given the behavior of those proxies, this seems
            # now excessive and we have changed the replay protection to
            # only trigger on messages that are more than one hour old.

            if client_time < long(remote_time - rdfvalue.Duration("1h")):
                logging.warning("Message desynchronized for %s: %s >= %s",
                                client_id, long(remote_time), int(client_time))
                # This is likely an old message
                return rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED

            stats.STATS.IncrementCounter("grr_authenticated_messages")

            # Update the client and server timestamps only if the client
            # time moves forward.
            if client_time > long(remote_time):
                client.Set(client.Schema.CLOCK,
                           rdfvalue.RDFDatetime(client_time))
                client.Set(client.Schema.PING, rdfvalue.RDFDatetime.Now())

                clock = client_time
                ping = rdfvalue.RDFDatetime.Now()

                for label in client.Get(client.Schema.LABELS, []):
                    stats.STATS.IncrementCounter("client_pings_by_label",
                                                 fields=[label.name])
            else:
                clock = None
                ping = None
                logging.warning("Out of order message for %s: %s >= %s",
                                client_id, long(remote_time), int(client_time))

            client.Flush()
            if data_store.RelationalDBWriteEnabled():
                source_ip = response_comms.orig_request.source_ip
                if source_ip:
                    last_ip = rdf_client.NetworkAddress(
                        human_readable_address=response_comms.orig_request.
                        source_ip)
                else:
                    last_ip = None

                if ping or clock or last_ip:
                    data_store.REL_DB.WriteClientMetadata(
                        client_id.Basename(),
                        last_ip=last_ip,
                        last_clock=clock,
                        last_ping=ping,
                        fleetspeak_enabled=False)

        except communicator.UnknownClientCert:
            pass

        return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
Esempio n. 14
0
    def HandleRequest(self, request):
        """Handles given HTTP request."""
        impersonated_username = config.CONFIG["AdminUI.debug_impersonate_user"]
        if impersonated_username:
            logging.info("Overriding user as %s", impersonated_username)
            request.user = config.CONFIG["AdminUI.debug_impersonate_user"]

        if not aff4_users.GRRUser.IsValidUsername(request.user):
            return self._BuildResponse(
                403, dict(message="Invalid username: %s" % request.user))

        try:
            router, method_metadata, args = self._router_matcher.MatchRouter(
                request)
        except access_control.UnauthorizedAccess as e:
            logging.exception("Access denied to %s (%s): %s", request.path,
                              request.method, e)

            additional_headers = {
                "X-GRR-Unauthorized-Access-Reason":
                utils.SmartStr(e.message).replace("\n", ""),
                "X-GRR-Unauthorized-Access-Subject":
                utils.SmartStr(e.subject)
            }
            return self._BuildResponse(
                403,
                dict(message="Access denied by ACL: %s" %
                     utils.SmartStr(e.message),
                     subject=utils.SmartStr(e.subject)),
                headers=additional_headers)

        except ApiCallRouterNotFoundError as e:
            return self._BuildResponse(404, dict(message=e.message))
        except werkzeug_exceptions.MethodNotAllowed as e:
            return self._BuildResponse(405, dict(message=e.message))
        except Error as e:
            logging.exception("Can't match URL to router/method: %s", e)

            return self._BuildResponse(
                500, dict(message=str(e), traceBack=traceback.format_exc()))

        # SetUID() is called here so that ACL checks done by the router do not
        # clash with datastore ACL checks.
        # TODO(user): increase token expiry time.
        token = self.BuildToken(request, 60).SetUID()

        # We send a blind-write request to ensure that the user object is created
        # for a user specified by the username.
        user_urn = rdfvalue.RDFURN("aff4:/users/").Add(request.user)
        # We can't use conventional AFF4 interface, since aff4.FACTORY.Create will
        # create a new version of the object for every call.
        with data_store.DB.GetMutationPool() as pool:
            pool.MultiSet(user_urn, {
                aff4_users.GRRUser.SchemaCls.TYPE:
                [aff4_users.GRRUser.__name__],
                aff4_users.GRRUser.SchemaCls.LAST:
                [rdfvalue.RDFDatetime.Now().SerializeToDataStore()]
            },
                          replace=True)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteGRRUser(request.user)

        handler = None
        try:
            # ACL checks are done here by the router. If this method succeeds (i.e.
            # does not raise), then handlers run without further ACL checks (they're
            # free to do some in their own implementations, though).
            handler = getattr(router, method_metadata.name)(args, token=token)

            if handler.args_type != method_metadata.args_type:
                raise RuntimeError(
                    "Handler args type doesn't match "
                    "method args type: %s vs %s" %
                    (handler.args_type, method_metadata.args_type))

            binary_result_type = (
                api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE)

            if (handler.result_type != method_metadata.result_type and
                    not (handler.result_type is None and
                         method_metadata.result_type == binary_result_type)):
                raise RuntimeError(
                    "Handler result type doesn't match "
                    "method result type: %s vs %s" %
                    (handler.result_type, method_metadata.result_type))

            # HEAD method is only used for checking the ACLs for particular API
            # methods.
            if request.method == "HEAD":
                # If the request would return a stream, we add the Content-Length
                # header to the response.
                if (method_metadata.result_type ==
                        method_metadata.BINARY_STREAM_RESULT_TYPE):
                    binary_stream = handler.Handle(args, token=token)
                    return self._BuildResponse(
                        200, {"status": "OK"},
                        method_name=method_metadata.name,
                        no_audit_log=method_metadata.no_audit_log_required,
                        content_length=binary_stream.content_length,
                        token=token)
                else:
                    return self._BuildResponse(
                        200, {"status": "OK"},
                        method_name=method_metadata.name,
                        no_audit_log=method_metadata.no_audit_log_required,
                        token=token)

            if (method_metadata.result_type ==
                    method_metadata.BINARY_STREAM_RESULT_TYPE):
                binary_stream = handler.Handle(args, token=token)
                return self._BuildStreamingResponse(
                    binary_stream, method_name=method_metadata.name)
            else:
                format_mode = GetRequestFormatMode(request, method_metadata)
                result = self.CallApiHandler(handler, args, token=token)
                rendered_data = self._FormatResultAsJson(
                    result, format_mode=format_mode)

                return self._BuildResponse(
                    200,
                    rendered_data,
                    method_name=method_metadata.name,
                    no_audit_log=method_metadata.no_audit_log_required,
                    token=token)
        except access_control.UnauthorizedAccess as e:
            logging.exception("Access denied to %s (%s) with %s: %s",
                              request.path, request.method,
                              method_metadata.name, e)

            additional_headers = {
                "X-GRR-Unauthorized-Access-Reason":
                utils.SmartStr(e.message).replace("\n", ""),
                "X-GRR-Unauthorized-Access-Subject":
                utils.SmartStr(e.subject)
            }
            return self._BuildResponse(
                403,
                dict(message="Access denied by ACL: %s" % e.message,
                     subject=utils.SmartStr(e.subject)),
                headers=additional_headers,
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except api_call_handler_base.ResourceNotFoundError as e:
            return self._BuildResponse(
                404,
                dict(message=e.message),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except NotImplementedError as e:
            return self._BuildResponse(
                501,
                dict(message=e.message),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except Exception as e:  # pylint: disable=broad-except
            logging.exception("Error while processing %s (%s) with %s: %s",
                              request.path, request.method,
                              handler.__class__.__name__, e)
            return self._BuildResponse(
                500,
                dict(message=str(e), traceBack=traceback.format_exc()),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
Esempio n. 15
0
  def Platform(self, responses):
    """Stores information about the platform."""
    if responses.success:
      response = responses.First()
      # AFF4 client.

      # These need to be in separate attributes because they get searched on in
      # the GUI
      with self._OpenClient(mode="rw") as client:
        # For backwards compatibility.
        client.Set(client.Schema.HOSTNAME(response.fqdn))
        client.Set(client.Schema.SYSTEM(response.system))
        client.Set(client.Schema.OS_RELEASE(response.release))
        client.Set(client.Schema.OS_VERSION(response.version))
        client.Set(client.Schema.KERNEL(response.kernel))
        client.Set(client.Schema.FQDN(response.fqdn))

        # response.machine is the machine value of platform.uname()
        # On Windows this is the value of:
        # HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session
        # Manager\Environment\PROCESSOR_ARCHITECTURE
        # "AMD64", "IA64" or "x86"
        client.Set(client.Schema.ARCH(response.machine))
        client.Set(
            client.Schema.UNAME("%s-%s-%s" % (response.system, response.release,
                                              response.version)))

        # Update the client index
        client_index.CreateClientIndex(token=self.token).AddClient(client)

      # objects.Client.
      client = self.state.client
      client.os_release = response.release
      client.os_version = response.version
      client.kernel = response.kernel
      client.arch = response.machine
      # Store these for later, there might be more accurate data
      # coming in from the artifact collector.
      self.state.fqdn = response.fqdn
      self.state.os = response.system

      if data_store.RelationalDBWriteEnabled():
        try:
          # Update the client index
          client_index.ClientIndex().AddClient(self.client_id.Basename(),
                                               client)
        except db.UnknownClientError:
          pass

      if response.system == "Windows":
        with aff4.FACTORY.Create(
            self.client_id.Add("registry"),
            standard.VFSDirectory,
            token=self.token) as fd:
          fd.Set(fd.Schema.PATHSPEC,
                 fd.Schema.PATHSPEC(
                     path="/", pathtype=rdf_paths.PathSpec.PathType.REGISTRY))

      # No support for OS X cloud machines as yet.
      if response.system in ["Linux", "Windows"]:
        self.CallClient(
            server_stubs.GetCloudVMMetadata,
            cloud.BuildCloudMetadataRequests(),
            next_state="CloudMetadata")

      known_system_type = True
    else:
      # We failed to get the Platform info, maybe there is a stored
      # system we can use to get at least some data.
      if data_store.RelationalDBReadEnabled():
        client = data_store.REL_DB.ReadClient(self.client_id.Basename())
        known_system_type = client and client.knowledge_base.os
      else:
        client = self._OpenClient()
        known_system_type = client.Get(client.Schema.SYSTEM)

      self.Log("Could not retrieve Platform info.")

    if known_system_type:
      # We will accept a partial KBInit rather than raise, so pass
      # require_complete=False.
      self.CallFlow(
          artifact.KnowledgeBaseInitializationFlow.__name__,
          require_complete=False,
          lightweight=self.args.lightweight,
          next_state="ProcessKnowledgeBase")
    else:
      self.Log("Unknown system type, skipping KnowledgeBaseInitializationFlow")
Esempio n. 16
0
    def AssignTasksToClient(self, client_id):
        """Examines our rules and starts up flows based on the client.

    Args:
      client_id: Client id of the client for tasks to be assigned.

    Returns:
      Number of assigned tasks.
    """
        rules = self.Get(self.Schema.RULES)
        if not rules:
            return 0

        if data_store.RelationalDBReadEnabled():
            last_foreman_run = self._GetLastForemanRunRelational(client_id)
        else:
            last_foreman_run = self._GetLastForemanRun(client_id)

        latest_rule = max(rule.created for rule in rules)

        if latest_rule <= last_foreman_run:
            return 0

        # Update the latest checked rule on the client.
        if data_store.RelationalDBWriteEnabled():
            try:
                self._SetLastForemanRunRelational(client_id, latest_rule)
            except db.UnknownClientError:
                pass

        # If the relational db is used for reads, we don't have to update the
        # aff4 object.
        if not data_store.RelationalDBReadEnabled():
            self._SetLastForemanRun(client_id, latest_rule)

        relevant_rules = []
        expired_rules = False

        now = time.time() * 1e6

        for rule in rules:
            if rule.expires < now:
                expired_rules = True
                continue
            if rule.created <= int(last_foreman_run):
                continue

            relevant_rules.append(rule)

        if data_store.RelationalDBReadEnabled():
            client_data = data_store.REL_DB.ReadClientFullInfo(client_id)
        else:
            client_data = aff4.FACTORY.Open(client_id,
                                            mode="rw",
                                            token=self.token)

        actions_count = 0
        for rule in relevant_rules:
            if self._EvaluateRules(rule, client_data):
                actions_count += self._RunActions(rule, client_id)

        if expired_rules:
            self.ExpireRules()

        return actions_count