Exemple #1
0
  def End(self):
    """Finalize client registration."""
    # Update summary and publish to the Discovery queue.

    if data_store.RelationalDBWriteEnabled():
      try:
        data_store.REL_DB.WriteClientSnapshot(self.state.client)
      except db.UnknownClientError:
        pass

    client = self._OpenClient()

    if data_store.RelationalDBReadEnabled():
      summary = self.state.client.GetSummary()
      summary.client_id = self.client_id
      summary.timestamp = rdfvalue.RDFDatetime.Now()
    else:
      summary = client.GetSummary()

    self.Publish("Discovery", summary)
    self.SendReply(summary)

    # Update the client index
    client_index.CreateClientIndex(token=self.token).AddClient(client)
    if data_store.RelationalDBWriteEnabled():
      try:
        index = client_index.ClientIndex()
        index.AddClient(self.state.client)
        labels = self.state.client.startup_info.client_info.labels
        if labels:
          data_store.REL_DB.AddClientLabels(self.state.client.client_id, "GRR",
                                            labels)
      except db.UnknownClientError:
        # TODO(amoser): Remove after data migration.
        pass
Exemple #2
0
    def Start(self):
        """Sign the CSR from the client."""

        if self.args.csr.type != rdf_crypto.Certificate.Type.CSR:
            raise ValueError("Must be called with CSR")

        csr = rdf_crypto.CertificateSigningRequest(self.args.csr.pem)
        # Verify the CSR. This is not strictly necessary but doesn't harm either.
        try:
            csr.Verify(csr.GetPublicKey())
        except rdf_crypto.VerificationError:
            raise flow.FlowError("CSR for client %s did not verify: %s" %
                                 (self.client_id, csr.AsPEM()))

        # Verify that the CN is of the correct form. The common name should refer
        # to a client URN.
        self.cn = rdf_client.ClientURN.FromPublicKey(csr.GetPublicKey())
        if self.cn != csr.GetCN():
            raise ValueError("CSR CN %s does not match public key %s." %
                             (csr.GetCN(), self.cn))

        logging.info("Will sign CSR for: %s", self.cn)

        cert = rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr)

        # This check is important to ensure that the client id reported in the
        # source of the enrollment request is the same as the one in the
        # certificate. We use the ClientURN to ensure this is also of the correct
        # form for a client name.
        if self.cn != self.client_id:
            raise flow.FlowError("Certificate name %s mismatch for client %s" %
                                 (self.cn, self.client_id))

        with aff4.FACTORY.Create(self.client_id,
                                 aff4_grr.VFSGRRClient,
                                 mode="rw",
                                 token=self.token) as client:
            # Set and write the certificate to the client record.
            now = rdfvalue.RDFDatetime.Now()
            client.Set(client.Schema.CERT, cert)
            client.Set(client.Schema.FIRST_SEEN, now)
            if data_store.RelationalDBWriteEnabled():
                data_store.REL_DB.WriteClientMetadata(
                    self.client_id.Basename(),
                    certificate=cert,
                    first_seen=now,
                    fleetspeak_enabled=False)

            index = client_index.CreateClientIndex(token=self.token)
            index.AddClient(client)
            if data_store.RelationalDBWriteEnabled():
                index = client_index.ClientIndex()
                index.AddClient(data_migration.ConvertVFSGRRClient(client))
        # Publish the client enrollment message.
        self.Publish("ClientEnrollment", self.client_id)

        self.Log("Enrolled %s successfully", self.client_id)
Exemple #3
0
  def EnrolFleetspeakClient(self, client_id):
    """Enrols a Fleetspeak-enabled client for use with GRR."""
    client_urn = rdf_client.ClientURN(client_id)

    # If already enrolled, return.
    if data_store.RelationalDBReadEnabled():
      if data_store.REL_DB.ReadClientMetadata(client_id):
        return
    else:
      if aff4.FACTORY.ExistsWithType(
          client_urn, aff4_type=aff4_grr.VFSGRRClient, token=self.token):
        return

    logging.info("Enrolling a new Fleetspeak client: %r", client_id)

    if data_store.RelationalDBWriteEnabled():
      data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=True)

    # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
    # catch exceptions from it and forward them to Fleetspeak by failing its
    # gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
    # instance of the GRR frontend.
    with aff4.FACTORY.Create(
        client_urn,
        aff4_type=aff4_grr.VFSGRRClient,
        mode="rw",
        token=self.token) as client:

      client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))

      index = client_index.CreateClientIndex(token=self.token)
      index.AddClient(client)
      if data_store.RelationalDBWriteEnabled():
        index = client_index.ClientIndex()
        index.AddClient(data_migration.ConvertVFSGRRClient(client))

    enrollment_session_id = rdfvalue.SessionID(
        queue=queues.ENROLLMENT, flow_name="Enrol")

    publish_msg = rdf_flows.GrrMessage(
        payload=client_urn,
        session_id=enrollment_session_id,
        # Fleetspeak ensures authentication.
        auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
        source=enrollment_session_id,
        priority=rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY)

    # Publish the client enrollment message.
    events.Events.PublishEvent(
        "ClientEnrollment", publish_msg, token=self.token)
Exemple #4
0
    def ProcessFingerprint(self, responses):
        """Store the fingerprint response."""
        if not responses.success:
            # Its better to raise rather than merely logging since it will make it to
            # the flow's protobuf and users can inspect the reason this flow failed.
            raise flow.FlowError("Could not fingerprint file: %s" %
                                 responses.status)

        response = responses.First()
        if response.pathspec.path:
            pathspec = response.pathspec
        else:
            pathspec = self.args.pathspec

        self.state.urn = pathspec.AFF4Path(self.client_id)

        with aff4.FACTORY.Create(self.state.urn,
                                 aff4_grr.VFSFile,
                                 mode="w",
                                 token=self.token) as fd:
            hash_obj = response.hash
            fd.Set(fd.Schema.HASH, hash_obj)

        if data_store.RelationalDBWriteEnabled():
            path_info = rdf_objects.PathInfo.FromPathSpec(pathspec)
            path_info.hash_entry = response.hash

            data_store.REL_DB.WritePathInfos(self.client_id.Basename(),
                                             [path_info])

        self.ReceiveFileFingerprint(self.state.urn,
                                    hash_obj,
                                    request_data=responses.request_data)
Exemple #5
0
    def SetupTestTimeline(self):
        self.client_id = self.SetupClient(0)
        fixture_test_lib.ClientFixture(self.client_id, token=self.token)

        # Choose some directory with pathspec in the ClientFixture.
        self.category_path = "fs/os"
        self.folder_path = self.category_path + "/Users/中国新闻网新闻中/Shared"
        self.file_path = self.folder_path + "/a.txt"

        file_urn = self.client_id.Add(self.file_path)
        for i in range(0, 5):
            with test_lib.FakeTime(i):
                stat_entry = rdf_client.StatEntry()
                stat_entry.st_mtime = rdfvalue.RDFDatetimeSeconds.Now()
                stat_entry.pathspec.path = self.folder_path[
                    len(self.category_path):]
                stat_entry.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS

                with aff4.FACTORY.Create(file_urn,
                                         aff4_grr.VFSFile,
                                         mode="w",
                                         token=self.token) as fd:
                    fd.Set(fd.Schema.STAT, stat_entry)

                if data_store.RelationalDBWriteEnabled():
                    client_id = self.client_id.Basename()
                    path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
                    data_store.REL_DB.WritePathInfos(client_id, [path_info])
Exemple #6
0
    def List(self, responses):
        """Collect the directory listing and store in the datastore."""
        if not responses.success:
            raise flow.FlowError(str(responses.status))

        self.Status("Listed %s", self.state.urn)

        with data_store.DB.GetMutationPool() as pool:
            with aff4.FACTORY.Create(self.state.urn,
                                     standard.VFSDirectory,
                                     mode="w",
                                     mutation_pool=pool,
                                     token=self.token) as fd:
                fd.Set(fd.Schema.PATHSPEC(self.state.stat.pathspec))
                fd.Set(fd.Schema.STAT(self.state.stat))

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo.FromStatEntry(self.state.stat)
                data_store.REL_DB.WritePathInfos(self.client_id.Basename(),
                                                 [path_info])

            stat_entries = map(rdf_client.StatEntry, responses)
            WriteStatEntries(stat_entries,
                             client_id=self.client_id,
                             mutation_pool=pool,
                             token=self.token)

            for stat_entry in stat_entries:
                self.SendReply(stat_entry)  # Send Stats to parent flows.
Exemple #7
0
  def WriteBuffer(self, responses):
    """Write the hash received to the blob image."""

    # Note that hashes must arrive at this state in the correct order since they
    # are sent in the correct order (either via CallState or CallClient).
    index = responses.request_data["index"]
    if index not in self.state.pending_files:
      return

    # Failed to read the file - ignore it.
    if not responses.success:
      self._FileFetchFailed(index, responses.request.request.name)
      return

    response = responses.First()
    file_tracker = self.state.pending_files.get(index)
    if file_tracker:
      file_tracker.setdefault("blobs", []).append((response.data,
                                                   response.length))

      download_size = file_tracker["size_to_download"]
      if (response.length < self.CHUNK_SIZE or
          response.offset + response.length >= download_size):

        # Write the file to the data store.
        stat_entry = file_tracker["stat_entry"]
        urn = stat_entry.pathspec.AFF4Path(self.client_id)

        with aff4.FACTORY.Create(
            urn, aff4_grr.VFSBlobImage, mode="w", token=self.token) as fd:

          fd.SetChunksize(self.CHUNK_SIZE)
          fd.Set(fd.Schema.STAT(stat_entry))
          fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec))
          fd.Set(fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now()))

          for digest, length in file_tracker["blobs"]:
            fd.AddBlob(digest, length)

          # Save some space.
          del file_tracker["blobs"]

        if data_store.RelationalDBWriteEnabled():
          client_id = self.client_id.Basename()
          path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
          data_store.REL_DB.WritePathInfos(client_id, [path_info])

        # File done, remove from the store and close it.
        self._ReceiveFetchedFile(file_tracker)

        # Publish the new file event to cause the file to be added to the
        # filestore.
        self.Publish("FileStore.AddFileToStore", urn)

        self.state.files_fetched += 1

        if not self.state.files_fetched % 100:
          self.Log("Fetched %d of %d files.", self.state.files_fetched,
                   self.state.files_to_fetch)
Exemple #8
0
    def testSwitchingBetweenFileVersionsRefreshesDownloadTab(self):
        urn_a = rdfvalue.RDFURN("%s/fs/os/c/Downloads/a.txt" % self.client_id)
        path_info = rdf_objects.PathInfo.OS(
            components=["c", "Downloads", "a.txt"])

        # Test files are set up using self.CreateFileVersions call in test's
        # setUp method. Amend created file versions by adding different
        # hashes to versions corresponding to different times.
        # Note that a string passed to fd.Schema.HASH constructor will be
        # printed as a hexademical bytestring. Thus "111" will become "313131"
        # and "222" will become "323232".
        with test_lib.FakeTime(gui_test_lib.TIME_0):
            with aff4.FACTORY.Create(urn_a,
                                     aff4_type=aff4_grr.VFSFile,
                                     force_new_version=False,
                                     object_exists=True) as fd:
                fd.Set(fd.Schema.HASH(sha256="111"))

            if data_store.RelationalDBWriteEnabled():
                path_info.hash_entry.sha256 = b"111"
                data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

        with test_lib.FakeTime(gui_test_lib.TIME_1):
            with aff4.FACTORY.Create(urn_a,
                                     aff4_type=aff4_grr.VFSFile,
                                     force_new_version=False,
                                     object_exists=True) as fd:
                fd.Set(fd.Schema.HASH(sha256="222"))

            if data_store.RelationalDBWriteEnabled():
                path_info.hash_entry.sha256 = b"222"
                data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

        # Open a URL corresponding to a HEAD version of the file.
        self.Open("/#/clients/%s/vfs/fs/os/c/Downloads/a.txt?tab=download" %
                  self.client_id)
        # Make sure displayed hash value is correct.
        self.WaitUntil(self.IsElementPresent,
                       "css=tr:contains('Sha256') td:contains('323232')")

        # Select the previous file version.
        self.Click("css=select.version-dropdown > option:contains(\"%s\")" %
                   gui_test_lib.DateString(gui_test_lib.TIME_0))
        # Make sure displayed hash value gets updated.
        self.WaitUntil(self.IsElementPresent,
                       "css=tr:contains('Sha256') td:contains('313131')")
Exemple #9
0
    def ProcessMessage(self, message=None, event=None):
        """Handle a startup event."""
        _ = event
        # We accept unauthenticated messages so there are no errors but we don't
        # store the results.
        if (message.auth_state !=
                rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
            return

        client_id = message.source
        new_si = message.payload
        drift = rdfvalue.Duration("5m")

        if data_store.RelationalDBReadEnabled():
            current_si = data_store.REL_DB.ReadClientStartupInfo(
                client_id.Basename())

            # We write the updated record if the client_info has any changes
            # or the boot time is more than 5 minutes different.
            if (not current_si or current_si.client_info != new_si.client_info
                    or not current_si.boot_time
                    or abs(current_si.boot_time - new_si.boot_time) > drift):
                data_store.REL_DB.WriteClientStartupInfo(
                    client_id.Basename(), new_si)

        else:
            changes = False
            with aff4.FACTORY.Create(client_id,
                                     aff4_grr.VFSGRRClient,
                                     mode="rw",
                                     token=self.token) as client:
                old_info = client.Get(client.Schema.CLIENT_INFO)
                old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0)

                info = new_si.client_info

                # Only write to the datastore if we have new information.
                if info != old_info:
                    client.Set(client.Schema.CLIENT_INFO(info))
                    changes = True

                client.AddLabels(info.labels, owner="GRR")

                # Allow for some drift in the boot times (5 minutes).
                if not old_boot or abs(old_boot - new_si.boot_time) > drift:
                    client.Set(client.Schema.LAST_BOOT_TIME(new_si.boot_time))
                    changes = True

            if data_store.RelationalDBWriteEnabled() and changes:
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    pass

        events.Events.PublishEventInline("ClientStartup",
                                         message,
                                         token=self.token)
Exemple #10
0
    def EnrolFleetspeakClient(self, client_id):
        """Enrols a Fleetspeak-enabled client for use with GRR."""
        client_urn = rdf_client.ClientURN(client_id)

        # If already enrolled, return.
        if data_store.RelationalDBReadEnabled():
            if data_store.REL_DB.ReadClientMetadata(client_id):
                return
        else:
            if aff4.FACTORY.ExistsWithType(client_urn,
                                           aff4_type=aff4_grr.VFSGRRClient,
                                           token=self.token):
                return

        logging.info("Enrolling a new Fleetspeak client: %r", client_id)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id,
                                                  fleetspeak_enabled=True)

        # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
        # catch exceptions from it and forward them to Fleetspeak by failing its
        # gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
        # instance of the GRR frontend.
        with aff4.FACTORY.Create(client_urn,
                                 aff4_type=aff4_grr.VFSGRRClient,
                                 mode="rw",
                                 token=self.token) as client:

            client.Set(client.Schema.FLEETSPEAK_ENABLED,
                       rdfvalue.RDFBool(True))

            index = client_index.CreateClientIndex(token=self.token)
            index.AddClient(client)
            if data_store.RelationalDBWriteEnabled():
                index = client_index.ClientIndex()
                index.AddClient(data_migration.ConvertVFSGRRClient(client))

        # Publish the client enrollment message.
        events.Events.PublishEvent("ClientEnrollment",
                                   client_urn,
                                   token=self.token)
Exemple #11
0
    def IterateFind(self, responses):
        """Iterate in this state until no more results are available."""
        if not responses.success:
            raise IOError(responses.status)

        with data_store.DB.GetMutationPool() as pool:
            for response in responses:
                # Create the file in the VFS
                vfs_urn = response.hit.pathspec.AFF4Path(self.client_id)

                if stat.S_ISDIR(response.hit.st_mode):
                    fd = aff4.FACTORY.Create(vfs_urn,
                                             standard.VFSDirectory,
                                             mutation_pool=pool,
                                             token=self.token)
                else:
                    fd = aff4.FACTORY.Create(vfs_urn,
                                             aff4_grr.VFSFile,
                                             mutation_pool=pool,
                                             token=self.token)

                with fd:
                    stat_response = fd.Schema.STAT(response.hit)
                    fd.Set(stat_response)
                    fd.Set(fd.Schema.PATHSPEC(response.hit.pathspec))

                if data_store.RelationalDBWriteEnabled():
                    client_id = self.client_id.Basename()
                    path_info = rdf_objects.PathInfo.FromStatEntry(
                        response.hit)
                    data_store.REL_DB.WritePathInfos(client_id, [path_info])

                # Send the stat to the parent flow.
                self.SendReply(stat_response)

        self.state.received_count += len(responses)

        # Exit if we hit the max result count we wanted or we're finished. Note that
        # we may exceed the max_results if the iteration yielded too many results,
        # we simply will not return to the client for another iteration.
        if (self.state.received_count < self.args.max_results and
                responses.iterator.state != responses.iterator.State.FINISHED):

            self.args.findspec.iterator = responses.iterator

            # If we are close to max_results reduce the iterator.
            self.args.findspec.iterator.number = min(
                self.args.findspec.iterator.number,
                self.args.max_results - self.state.received_count)

            self.CallClient(server_stubs.Find,
                            self.args.findspec,
                            next_state="IterateFind")
            self.Log("%d files processed.", self.state.received_count)
Exemple #12
0
    def Execute(self):
        """Runs the migration procedure."""
        if not data_store.RelationalDBWriteEnabled():
            raise ValueError("No relational database available.")

        sys.stdout.write("Collecting clients...\n")
        users = self._GetUsers()

        sys.stdout.write("Users to migrate: {}\n".format(len(users)))
        for u in users:
            self._MigrateUser(u)
Exemple #13
0
    def RecordFleetspeakClientPing(self, client_id):
        """Records the last client contact in the datastore."""
        ping = rdfvalue.RDFDatetime.Now()
        with aff4.FACTORY.Create(rdf_client.ClientURN(client_id),
                                 aff4_type=aff4_grr.VFSGRRClient,
                                 mode="w",
                                 token=self.token,
                                 force_new_version=False) as client:
            client.Set(client.Schema.PING, ping)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id, last_ping=ping)
Exemple #14
0
    def ProcessMessage(self, message=None):
        """Handle a startup event."""

        client_id = message.source
        new_si = message.payload
        drift = rdfvalue.Duration("5m")

        if data_store.RelationalDBReadEnabled():
            current_si = data_store.REL_DB.ReadClientStartupInfo(
                client_id.Basename())

            # We write the updated record if the client_info has any changes
            # or the boot time is more than 5 minutes different.
            if (not current_si or current_si.client_info != new_si.client_info
                    or not current_si.boot_time
                    or abs(current_si.boot_time - new_si.boot_time) > drift):
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    # On first contact with a new client, this write will fail.
                    logging.info(
                        "Can't write StartupInfo for unknown client %s",
                        client_id)
        else:
            changes = False
            with aff4.FACTORY.Create(client_id,
                                     aff4_grr.VFSGRRClient,
                                     mode="rw",
                                     token=self.token) as client:
                old_info = client.Get(client.Schema.CLIENT_INFO)
                old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0)

                info = new_si.client_info

                # Only write to the datastore if we have new information.
                if info != old_info:
                    client.Set(client.Schema.CLIENT_INFO(info))
                    changes = True

                client.AddLabels(info.labels, owner="GRR")

                # Allow for some drift in the boot times (5 minutes).
                if not old_boot or abs(old_boot - new_si.boot_time) > drift:
                    client.Set(client.Schema.LAST_BOOT_TIME(new_si.boot_time))
                    changes = True

            if data_store.RelationalDBWriteEnabled() and changes:
                try:
                    data_store.REL_DB.WriteClientStartupInfo(
                        client_id.Basename(), new_si)
                except db.UnknownClientError:
                    pass
Exemple #15
0
    def CreateFileVersions(self, client_id, file_path):
        """Add a new version for a file."""
        path_type, components = rdf_objects.ParseCategorizedPath(file_path)

        with test_lib.FakeTime(self.time_1):
            token = access_control.ACLToken(username="******")
            fd = aff4.FACTORY.Create(client_id.Add(file_path),
                                     aff4.AFF4MemoryStream,
                                     mode="w",
                                     token=token)
            fd.Write("Hello World")
            fd.Close()

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo()
                path_info.path_type = path_type
                path_info.components = components
                path_info.directory = False

                data_store.REL_DB.WritePathInfos(client_id.Basename(),
                                                 [path_info])

        with test_lib.FakeTime(self.time_2):
            fd = aff4.FACTORY.Create(client_id.Add(file_path),
                                     aff4.AFF4MemoryStream,
                                     mode="w",
                                     token=token)
            fd.Write("Goodbye World")
            fd.Close()

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo()
                path_info.path_type = path_type
                path_info.components = components
                path_info.directory = False

                data_store.REL_DB.WritePathInfos(client_id.Basename(),
                                                 [path_info])
Exemple #16
0
    def ProcessKnowledgeBase(self, responses):
        """Collect and store any extra non-kb artifacts."""
        if not responses.success:
            raise flow.FlowError(
                "Error while collecting the knowledge base: %s" %
                responses.status)

        kb = responses.First()
        # AFF4 client.
        client = self._OpenClient(mode="rw")
        client.Set(client.Schema.KNOWLEDGE_BASE, kb)

        # Copy usernames.
        usernames = [user.username for user in kb.users if user.username]
        client.AddAttribute(client.Schema.USERNAMES(" ".join(usernames)))

        self.CopyOSReleaseFromKnowledgeBase(kb, client)
        client.Flush()

        # objects.ClientSnapshot.

        # Information already present in the knowledge base takes precedence.
        if not kb.os:
            kb.os = self.state.system

        if not kb.fqdn:
            kb.fqdn = self.state.fqdn
        self.state.client.knowledge_base = kb

        artifact_list = [
            "WMILogicalDisks", "RootDiskVolumeUsage",
            "WMIComputerSystemProduct", "LinuxHardwareInfo",
            "OSXSPHardwareDataType"
        ]
        self.CallFlow(collectors.ArtifactCollectorFlow.__name__,
                      artifact_list=artifact_list,
                      next_state="ProcessArtifactResponses")

        # Update the client index for the AFF4 client.
        client_index.CreateClientIndex(token=self.token).AddClient(client)

        if data_store.RelationalDBWriteEnabled():
            try:
                # Update the client index for the objects.ClientSnapshot.
                client_index.ClientIndex().AddClient(self.state.client)
            except db.UnknownClientError:
                pass
Exemple #17
0
def WriteStatEntries(stat_entries, client_id, mutation_pool, token=None):
    """Persists information about stat entries.

  Args:
    stat_entries: A list of `StatEntry` instances.
    client_id: An id of a client the stat entries come from.
    mutation_pool: A mutation pool used for writing into the AFF4 data store.
    token: A token used for writing into the AFF4 data store.
  """
    for stat_entry in stat_entries:
        CreateAFF4Object(stat_entry,
                         client_id=client_id,
                         mutation_pool=mutation_pool,
                         token=token)

    if data_store.RelationalDBWriteEnabled():
        path_infos = map(rdf_objects.PathInfo.FromStatEntry, stat_entries)
        data_store.REL_DB.WritePathInfos(client_id.Basename(), path_infos)
Exemple #18
0
def CreateFolder(client_id, path, timestamp, token=None):
    """Creates a VFS folder."""
    with test_lib.FakeTime(timestamp):
        with aff4.FACTORY.Create(client_id.Add(path),
                                 aff4_type=aff4_standard.VFSDirectory,
                                 mode="w",
                                 token=token) as _:
            pass

        if data_store.RelationalDBWriteEnabled():
            path_type, components = rdf_objects.ParseCategorizedPath(path)

            path_info = rdf_objects.PathInfo()
            path_info.path_type = path_type
            path_info.components = components
            path_info.directory = True

            data_store.REL_DB.WritePathInfos(client_id.Basename(), [path_info])
Exemple #19
0
def CreateFileVersion(client_id, path, content, timestamp, token=None):
  """Add a new version for a file."""
  with test_lib.FakeTime(timestamp):
    with aff4.FACTORY.Create(
        client_id.Add(path), aff4_type=aff4_grr.VFSFile, mode="w",
        token=token) as fd:
      fd.Write(content)
      fd.Set(fd.Schema.CONTENT_LAST, rdfvalue.RDFDatetime.Now())

    if data_store.RelationalDBWriteEnabled():
      path_type, components = rdf_objects.ParseCategorizedPath(path)

      path_info = rdf_objects.PathInfo()
      path_info.path_type = path_type
      path_info.components = components
      path_info.directory = False

      data_store.REL_DB.WritePathInfos(client_id.Basename(), [path_info])
Exemple #20
0
    def ReadBuffer(self, responses):
        """Read the buffer and write to the file."""
        # Did it work?
        if responses.success:
            response = responses.First()
            if not response:
                raise IOError("Missing hash for offset %s missing" %
                              response.offset)

            if response.offset <= self.state.max_chunk_number * self.CHUNK_SIZE:
                # Response.data is the hash of the block (32 bytes) and
                # response.length is the length of the block.
                self.state.blobs.append((response.data, response.length))
                self.Log("Received blob hash %s", response.data.encode("hex"))

                # Add one more chunk to the window.
                self.FetchWindow(1)

            if response.offset + response.length >= self.state.file_size:
                # File is complete.
                stat_entry = self.state.stat_entry
                urn = self.state.stat_entry.AFF4Path(self.client_id)

                with aff4.FACTORY.Create(urn,
                                         aff4_grr.VFSBlobImage,
                                         token=self.token) as fd:
                    fd.SetChunksize(self.CHUNK_SIZE)
                    fd.Set(fd.Schema.STAT(stat_entry))

                    for data, length in self.state.blobs:
                        fd.AddBlob(data, length)
                        fd.Set(fd.Schema.CONTENT_LAST,
                               rdfvalue.RDFDatetime.Now())

                    # Save some space.
                    del self.state.blobs

                if data_store.RelationalDBWriteEnabled():
                    client_id = self.client_id.Basename()
                    path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
                    data_store.REL_DB.WritePathInfos(client_id, [path_info])

                self.state.success = True
    def _CreateFile(self, path, content, hashing=False):
        with aff4.FACTORY.Create(path, aff4.AFF4MemoryStream,
                                 token=self.token) as fd:
            fd.Write(content)

            if hashing:
                digest = hashlib.sha256(content).digest()
                fd.Set(fd.Schema.HASH, rdf_crypto.Hash(sha256=digest))

                if data_store.RelationalDBWriteEnabled():
                    client_id, vfs_path = path.Split(2)
                    path_type, components = rdf_objects.ParseCategorizedPath(
                        vfs_path)

                    path_info = rdf_objects.PathInfo()
                    path_info.path_type = path_type
                    path_info.components = components
                    path_info.hash_entry.sha256 = digest
                    data_store.REL_DB.WritePathInfos(client_id, [path_info])
Exemple #22
0
    def WriteAllCrashDetails(self,
                             client_id,
                             crash_details,
                             flow_session_id=None,
                             hunt_session_id=None,
                             token=None):
        """Updates the last crash attribute of the client."""

        # AFF4.
        with aff4.FACTORY.Create(client_id, aff4_grr.VFSGRRClient,
                                 token=token) as client_obj:
            client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details))

        # Relational db.
        if data_store.RelationalDBWriteEnabled():
            try:
                data_store.REL_DB.WriteClientCrashInfo(client_id.Basename(),
                                                       crash_details)
            except db.UnknownClientError:
                pass

        # Duplicate the crash information in a number of places so we can find it
        # easily.
        client_crashes = aff4_grr.VFSGRRClient.CrashCollectionURNForCID(
            client_id)
        self._AppendCrashDetails(client_crashes, crash_details)

        if flow_session_id:
            with aff4.FACTORY.Open(flow_session_id,
                                   flow.GRRFlow,
                                   mode="rw",
                                   age=aff4.NEWEST_TIME,
                                   token=token) as aff4_flow:
                aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details))

            hunt_session_id = self._ExtractHuntId(flow_session_id)
            if hunt_session_id and hunt_session_id != flow_session_id:
                hunt_obj = aff4.FACTORY.Open(hunt_session_id,
                                             aff4_type=implementation.GRRHunt,
                                             mode="rw",
                                             token=token)
                hunt_obj.RegisterCrash(crash_details)
Exemple #23
0
    def Handle(self, args, token=None):
        audit_description = ",".join([
            token.username + u"." + utils.SmartUnicode(name)
            for name in args.labels
        ])
        audit_events = []

        try:
            index = client_index.CreateClientIndex(token=token)
            client_objs = aff4.FACTORY.MultiOpen(
                [cid.ToClientURN() for cid in args.client_ids],
                aff4_type=aff4_grr.VFSGRRClient,
                mode="rw",
                token=token)
            for client_obj in client_objs:
                if data_store.RelationalDBWriteEnabled():
                    cid = client_obj.urn.Basename()
                    data_store.REL_DB.RemoveClientLabels(
                        cid, token.username, args.labels)
                    labels_to_remove = set(args.labels)
                    existing_labels = data_store.REL_DB.ReadClientLabels(cid)
                    for label in existing_labels:
                        labels_to_remove.discard(label.name)
                    if labels_to_remove:
                        idx = client_index.ClientIndex()
                        idx.RemoveClientLabels(cid, labels_to_remove)

                index.RemoveClientLabels(client_obj)
                self.RemoveClientLabels(client_obj, args.labels)
                index.AddClient(client_obj)
                client_obj.Close()

                audit_events.append(
                    rdf_events.AuditEvent(
                        user=token.username,
                        action="CLIENT_REMOVE_LABEL",
                        flow_name="handler.ApiRemoveClientsLabelsHandler",
                        client=client_obj.urn,
                        description=audit_description))
        finally:
            events.Events.PublishMultipleEvents(
                {audit.AUDIT_EVENT: audit_events}, token=token)
Exemple #24
0
    def Handle(self, args, token=None):
        audit_description = ",".join([
            token.username + u"." + utils.SmartUnicode(name)
            for name in args.labels
        ])
        audit_events = []

        try:
            index = client_index.CreateClientIndex(token=token)
            client_objs = aff4.FACTORY.MultiOpen(
                [cid.ToClientURN() for cid in args.client_ids],
                aff4_type=aff4_grr.VFSGRRClient,
                mode="rw",
                token=token)
            for client_obj in client_objs:
                if data_store.RelationalDBWriteEnabled():
                    cid = client_obj.urn.Basename()
                    try:
                        data_store.REL_DB.AddClientLabels(
                            cid, token.username, args.labels)
                        idx = client_index.ClientIndex()
                        idx.AddClientLabels(cid, args.labels)
                    except db.UnknownClientError:
                        # TODO(amoser): Remove after data migration.
                        pass

                client_obj.AddLabels(args.labels)
                index.AddClient(client_obj)
                client_obj.Close()

                audit_events.append(
                    rdf_events.AuditEvent(
                        user=token.username,
                        action="CLIENT_ADD_LABEL",
                        flow_name="handler.ApiAddClientsLabelsHandler",
                        client=client_obj.urn,
                        description=audit_description))
        finally:
            events.Events.PublishMultipleEvents(
                {audit.AUDIT_EVENT: audit_events}, token=token)
Exemple #25
0
    def testSwitchingBetweenFilesRefreshesFileHashes(self):
        # Create 2 files and set their HASH attributes to different values.
        # Note that a string passed to fd.Schema.HASH constructor will be
        # printed as a hexademical bytestring. Thus "111" will become "313131"
        # and "222" will become "323232".
        urn_a = rdfvalue.RDFURN("%s/fs/os/c/Downloads/a.txt" % self.client_id)
        with aff4.FACTORY.Open(urn_a, mode="rw") as fd:
            fd.Set(fd.Schema.HASH(sha256="111"))

        urn_b = rdfvalue.RDFURN("%s/fs/os/c/Downloads/b.txt" % self.client_id)
        with aff4.FACTORY.Open(urn_b, mode="rw") as fd:
            fd.Set(fd.Schema.HASH(sha256="222"))

        if data_store.RelationalDBWriteEnabled():
            path_info_a = rdf_objects.PathInfo()
            path_info_a.path_type = rdf_objects.PathInfo.PathType.OS
            path_info_a.components = ["c", "Downloads", "a.txt"]
            path_info_a.hash_entry.sha256 = b"111"

            path_info_b = rdf_objects.PathInfo()
            path_info_b.path_type = rdf_objects.PathInfo.PathType.OS
            path_info_b.components = ["c", "Downloads", "b.txt"]
            path_info_b.hash_entry.sha256 = b"222"

            data_store.REL_DB.WritePathInfos(self.client_id,
                                             [path_info_a, path_info_b])

        # Open a URL pointing to file "a".
        self.Open("/#/clients/%s/vfs/fs/os/c/Downloads/a.txt?tab=download" %
                  self.client_id)
        self.WaitUntil(self.IsElementPresent,
                       "css=tr:contains('Sha256') td:contains('313131')")

        # Click on a file table row with file "b". Information in the download
        # tab should get rerendered and we should see Sha256 value corresponding
        # to file "b".
        self.Click("css=tr:contains(\"b.txt\")")
        self.WaitUntil(self.IsElementPresent,
                       "css=tr:contains('Sha256') td:contains('323232')")
Exemple #26
0
    def Execute(self, thread_count):
        """Runs the migration procedure.

    Args:
      thread_count: A number of threads to execute the migration with.

    Raises:
      AssertionError: If not all clients have been migrated.
      ValueError: If the relational database backend is not available.
    """
        if not data_store.RelationalDBWriteEnabled():
            raise ValueError("No relational database available.")

        sys.stdout.write("Collecting clients...\n")
        client_urns = _GetClientUrns()

        sys.stdout.write("Clients to migrate: {}\n".format(len(client_urns)))
        sys.stdout.write("Threads to use: {}\n".format(thread_count))

        self._total_count = len(client_urns)
        self._migrated_count = 0
        self._start_time = rdfvalue.RDFDatetime.Now()

        batches = utils.Grouper(client_urns, _CLIENT_BATCH_SIZE)

        self._Progress()
        tp = pool.ThreadPool(processes=thread_count)
        tp.map(self._MigrateBatch, list(batches))
        self._Progress()

        if self._migrated_count == self._total_count:
            message = "\nMigration has been finished (migrated {} clients).\n".format(
                self._migrated_count)
            sys.stdout.write(message)
        else:
            message = "Not all clients have been migrated ({}/{})".format(
                self._migrated_count, self._total_count)
            raise AssertionError(message)
Exemple #27
0
    def ProcessStat(self, responses):
        # Did it work?
        if not responses.success:
            # It's better to raise rather than merely logging since it will
            # make it to the flow's protobuf and users can
            # inspect the reason this flow failed.
            raise IOError("Could not stat file: %s" % responses.status)

        client_stat = responses.First()

        # Update the pathspec to the one we got from the client.
        self.state.pathspec = client_stat.pathspec

        # If the file was big enough, we'll store it as an AFF4SparseImage
        if client_stat.st_size > self.args.size_threshold:
            urn = self.state.pathspec.AFF4Path(self.client_id)

            # TODO(user) When we can check the last update time of the
            # contents of a file, raise if the contents have been updated before here.

            fd = aff4.FACTORY.Create(urn,
                                     aff4_type=standard.AFF4SparseImage,
                                     token=self.token,
                                     mode="rw")
            fd.Set(fd.Schema.PATHSPEC, self.state.pathspec)
            fd.Set(fd.Schema.STAT, client_stat)
            fd.Flush()

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo.FromStatEntry(client_stat)
                data_store.REL_DB.WritePathInfos(self.client_id.Basename(),
                                                 [path_info])
        else:
            # Otherwise, just get the whole file.
            self.CallFlow(transfer.MultiGetFile.__name__,
                          pathspecs=[self.state.pathspec],
                          next_state="End")
Exemple #28
0
  def _CheckHashesWithFileStore(self):
    """Check all queued up hashes for existence in file store.

    Hashes which do not exist in the file store will be downloaded. This
    function flushes the entire queue (self.state.pending_hashes) in order to
    minimize the round trips to the file store.

    If a file was found in the file store it is copied from there into the
    client's VFS namespace. Otherwise, we request the client to hash every block
    in the file, and add it to the file tracking queue
    (self.state.pending_files).
    """
    if not self.state.pending_hashes:
      return

    # This map represents all the hashes in the pending urns.
    file_hashes = {}

    # Store a mapping of hash to tracker. Keys are hashdigest objects,
    # values are arrays of tracker dicts.
    hash_to_tracker = {}
    for index, tracker in self.state.pending_hashes.iteritems():

      # We might not have gotten this hash yet
      if tracker.get("hash_obj") is None:
        continue

      hash_obj = tracker["hash_obj"]
      digest = hash_obj.sha256
      file_hashes[index] = hash_obj
      hash_to_tracker.setdefault(digest, []).append(tracker)

    # First we get all the files which are present in the file store.
    files_in_filestore = {}

    # TODO(amoser): This object never changes, could this be a class attribute?
    filestore_obj = aff4.FACTORY.Open(
        filestore.FileStore.PATH,
        filestore.FileStore,
        mode="r",
        token=self.token)

    for file_store_urn, hash_obj in filestore_obj.CheckHashes(
        file_hashes.values(), external=self.state.use_external_stores):

      self.HeartBeat()

      # Since checkhashes only returns one digest per unique hash we need to
      # find any other files pending download with the same hash.
      for tracker in hash_to_tracker[hash_obj.sha256]:
        self.state.files_skipped += 1
        file_hashes.pop(tracker["index"])
        files_in_filestore[file_store_urn] = hash_obj
        # Remove this tracker from the pending_hashes store since we no longer
        # need to process it.
        self.state.pending_hashes.pop(tracker["index"])

    # Now that the check is done, reset our counter
    self.state.files_hashed_since_check = 0
    # Now copy all existing files to the client aff4 space.
    for filestore_file_urn, hash_obj in files_in_filestore.iteritems():

      for file_tracker in hash_to_tracker.get(hash_obj.sha256, []):
        stat_entry = file_tracker["stat_entry"]
        # Copy the existing file from the filestore to the client namespace.
        target_urn = stat_entry.pathspec.AFF4Path(self.client_id)

        aff4.FACTORY.Copy(
            filestore_file_urn, target_urn, update_timestamps=True)

        with aff4.FACTORY.Open(
            target_urn, mode="rw", token=self.token) as new_fd:
          new_fd.Set(new_fd.Schema.STAT, stat_entry)
          # Due to potential filestore corruption, the existing files
          # can have 0 size.
          if new_fd.size == 0:
            new_fd.size = (file_tracker["bytes_read"] or stat_entry.st_size)

        if data_store.RelationalDBWriteEnabled():
          client_id = self.client_id.Basename()
          path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
          data_store.REL_DB.WritePathInfos(client_id, [path_info])

        # Add this file to the filestore index.
        filestore_obj.AddURNToIndex(str(hash_obj.sha256), target_urn)

        # Report this hit to the flow's caller.
        self._ReceiveFetchedFile(file_tracker)

    # Now we iterate over all the files which are not in the store and arrange
    # for them to be copied.
    for index in file_hashes:

      # Move the tracker from the pending hashes store to the pending files
      # store - it will now be downloaded.
      file_tracker = self.state.pending_hashes.pop(index)
      self.state.pending_files[index] = file_tracker

      # If we already know how big the file is we use that, otherwise fall back
      # to the size reported by stat.
      if file_tracker["bytes_read"] > 0:
        file_tracker["size_to_download"] = file_tracker["bytes_read"]
      else:
        file_tracker["size_to_download"] = file_tracker["stat_entry"].st_size

      # We do not have the file here yet - we need to retrieve it.
      expected_number_of_hashes = (
          file_tracker["size_to_download"] / self.CHUNK_SIZE + 1)

      # We just hash ALL the chunks in the file now. NOTE: This maximizes client
      # VFS cache hit rate and is far more efficient than launching multiple
      # GetFile flows.
      self.state.files_to_fetch += 1

      for i in range(expected_number_of_hashes):
        if i == expected_number_of_hashes - 1:
          # The last chunk is short.
          length = file_tracker["size_to_download"] % self.CHUNK_SIZE
        else:
          length = self.CHUNK_SIZE
        self.CallClient(
            server_stubs.HashBuffer,
            pathspec=file_tracker["stat_entry"].pathspec,
            offset=i * self.CHUNK_SIZE,
            length=length,
            next_state="CheckHash",
            request_data=dict(index=index))

    if self.state.files_hashed % 100 == 0:
      self.Log("Hashed %d files, skipped %s already stored.",
               self.state.files_hashed, self.state.files_skipped)
Exemple #29
0
    def HandleRequest(self, request):
        """Handles given HTTP request."""
        impersonated_username = config.CONFIG["AdminUI.debug_impersonate_user"]
        if impersonated_username:
            logging.info("Overriding user as %s", impersonated_username)
            request.user = config.CONFIG["AdminUI.debug_impersonate_user"]

        if not aff4_users.GRRUser.IsValidUsername(request.user):
            return self._BuildResponse(
                403, dict(message="Invalid username: %s" % request.user))

        try:
            router, method_metadata, args = self._router_matcher.MatchRouter(
                request)
        except access_control.UnauthorizedAccess as e:
            logging.exception("Access denied to %s (%s): %s", request.path,
                              request.method, e)

            additional_headers = {
                "X-GRR-Unauthorized-Access-Reason":
                utils.SmartStr(e.message).replace("\n", ""),
                "X-GRR-Unauthorized-Access-Subject":
                utils.SmartStr(e.subject)
            }
            return self._BuildResponse(
                403,
                dict(message="Access denied by ACL: %s" %
                     utils.SmartStr(e.message),
                     subject=utils.SmartStr(e.subject)),
                headers=additional_headers)

        except ApiCallRouterNotFoundError as e:
            return self._BuildResponse(404, dict(message=e.message))
        except werkzeug_exceptions.MethodNotAllowed as e:
            return self._BuildResponse(405, dict(message=e.message))
        except Error as e:
            logging.exception("Can't match URL to router/method: %s", e)

            return self._BuildResponse(
                500, dict(message=str(e), traceBack=traceback.format_exc()))

        request.method_metadata = method_metadata
        request.parsed_args = args

        # SetUID() is called here so that ACL checks done by the router do not
        # clash with datastore ACL checks.
        # TODO(user): increase token expiry time.
        token = self.BuildToken(request, 60).SetUID()

        # We send a blind-write request to ensure that the user object is created
        # for a user specified by the username.
        user_urn = rdfvalue.RDFURN("aff4:/users/").Add(request.user)
        # We can't use conventional AFF4 interface, since aff4.FACTORY.Create will
        # create a new version of the object for every call.
        with data_store.DB.GetMutationPool() as pool:
            pool.MultiSet(user_urn, {
                aff4_users.GRRUser.SchemaCls.TYPE:
                [aff4_users.GRRUser.__name__],
                aff4_users.GRRUser.SchemaCls.LAST:
                [rdfvalue.RDFDatetime.Now().SerializeToDataStore()]
            },
                          replace=True)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteGRRUser(request.user)

        handler = None
        try:
            # ACL checks are done here by the router. If this method succeeds (i.e.
            # does not raise), then handlers run without further ACL checks (they're
            # free to do some in their own implementations, though).
            handler = getattr(router, method_metadata.name)(args, token=token)

            if handler.args_type != method_metadata.args_type:
                raise RuntimeError(
                    "Handler args type doesn't match "
                    "method args type: %s vs %s" %
                    (handler.args_type, method_metadata.args_type))

            binary_result_type = (
                api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE)

            if (handler.result_type != method_metadata.result_type and
                    not (handler.result_type is None and
                         method_metadata.result_type == binary_result_type)):
                raise RuntimeError(
                    "Handler result type doesn't match "
                    "method result type: %s vs %s" %
                    (handler.result_type, method_metadata.result_type))

            # HEAD method is only used for checking the ACLs for particular API
            # methods.
            if request.method == "HEAD":
                # If the request would return a stream, we add the Content-Length
                # header to the response.
                if (method_metadata.result_type ==
                        method_metadata.BINARY_STREAM_RESULT_TYPE):
                    binary_stream = handler.Handle(args, token=token)
                    return self._BuildResponse(
                        200, {"status": "OK"},
                        method_name=method_metadata.name,
                        no_audit_log=method_metadata.no_audit_log_required,
                        content_length=binary_stream.content_length,
                        token=token)
                else:
                    return self._BuildResponse(
                        200, {"status": "OK"},
                        method_name=method_metadata.name,
                        no_audit_log=method_metadata.no_audit_log_required,
                        token=token)

            if (method_metadata.result_type ==
                    method_metadata.BINARY_STREAM_RESULT_TYPE):
                binary_stream = handler.Handle(args, token=token)
                return self._BuildStreamingResponse(
                    binary_stream, method_name=method_metadata.name)
            else:
                format_mode = GetRequestFormatMode(request, method_metadata)
                result = self.CallApiHandler(handler, args, token=token)
                rendered_data = self._FormatResultAsJson(
                    result, format_mode=format_mode)

                return self._BuildResponse(
                    200,
                    rendered_data,
                    method_name=method_metadata.name,
                    no_audit_log=method_metadata.no_audit_log_required,
                    token=token)
        except access_control.UnauthorizedAccess as e:
            logging.exception("Access denied to %s (%s) with %s: %s",
                              request.path, request.method,
                              method_metadata.name, e)

            additional_headers = {
                "X-GRR-Unauthorized-Access-Reason":
                utils.SmartStr(e.message).replace("\n", ""),
                "X-GRR-Unauthorized-Access-Subject":
                utils.SmartStr(e.subject)
            }
            return self._BuildResponse(
                403,
                dict(message="Access denied by ACL: %s" % e.message,
                     subject=utils.SmartStr(e.subject)),
                headers=additional_headers,
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except api_call_handler_base.ResourceNotFoundError as e:
            return self._BuildResponse(
                404,
                dict(message=e.message),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except NotImplementedError as e:
            return self._BuildResponse(
                501,
                dict(message=e.message),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except Exception as e:  # pylint: disable=broad-except
            logging.exception("Error while processing %s (%s) with %s: %s",
                              request.path, request.method,
                              handler.__class__.__name__, e)
            return self._BuildResponse(
                500,
                dict(message=str(e), traceBack=traceback.format_exc()),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
Exemple #30
0
  def VerifyMessageSignature(self, response_comms, packed_message_list, cipher,
                             cipher_verified, api_version, remote_public_key):
    """Verifies the message list signature.

    In the server we check that the timestamp is later than the ping timestamp
    stored with the client. This ensures that client responses can not be
    replayed.

    Args:
      response_comms: The raw response_comms rdfvalue.
      packed_message_list: The PackedMessageList rdfvalue from the server.
      cipher: The cipher object that should be used to verify the message.
      cipher_verified: If True, the cipher's signature is not verified again.
      api_version: The api version we should use.
      remote_public_key: The public key of the source.
    Returns:
      An rdf_flows.GrrMessage.AuthorizationState.
    """
    if (not cipher_verified and
        not cipher.VerifyCipherSignature(remote_public_key)):
      stats.STATS.IncrementCounter("grr_unauthenticated_messages")
      return rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED

    try:
      client_id = cipher.cipher_metadata.source
      try:
        client = self.client_cache.Get(client_id)
      except KeyError:
        client = aff4.FACTORY.Create(
            client_id,
            aff4.AFF4Object.classes["VFSGRRClient"],
            mode="rw",
            token=self.token)
        self.client_cache.Put(client_id, client)
        stats.STATS.SetGaugeValue("grr_frontendserver_client_cache_size",
                                  len(self.client_cache))

      ip = response_comms.orig_request.source_ip
      client.Set(client.Schema.CLIENT_IP(ip))

      # The very first packet we see from the client we do not have its clock
      remote_time = client.Get(client.Schema.CLOCK) or rdfvalue.RDFDatetime(0)
      client_time = packed_message_list.timestamp or rdfvalue.RDFDatetime(0)

      # This used to be a strict check here so absolutely no out of
      # order messages would be accepted ever. Turns out that some
      # proxies can send your request with some delay even if the
      # client has already timed out (and sent another request in
      # the meantime, making the first one out of order). In that
      # case we would just kill the whole flow as a
      # precaution. Given the behavior of those proxies, this seems
      # now excessive and we have changed the replay protection to
      # only trigger on messages that are more than one hour old.

      if client_time < long(remote_time - rdfvalue.Duration("1h")):
        logging.warning("Message desynchronized for %s: %s >= %s", client_id,
                        long(remote_time), int(client_time))
        # This is likely an old message
        return rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED

      stats.STATS.IncrementCounter("grr_authenticated_messages")

      # Update the client and server timestamps only if the client
      # time moves forward.
      if client_time > long(remote_time):
        client.Set(client.Schema.CLOCK, rdfvalue.RDFDatetime(client_time))
        client.Set(client.Schema.PING, rdfvalue.RDFDatetime.Now())

        clock = client_time
        ping = rdfvalue.RDFDatetime.Now()

        for label in client.Get(client.Schema.LABELS, []):
          stats.STATS.IncrementCounter(
              "client_pings_by_label", fields=[label.name])
      else:
        clock = None
        ping = None
        logging.warning("Out of order message for %s: %s >= %s", client_id,
                        long(remote_time), int(client_time))

      client.Flush()
      if data_store.RelationalDBWriteEnabled():
        source_ip = response_comms.orig_request.source_ip
        if source_ip:
          last_ip = rdf_client.NetworkAddress(
              human_readable_address=response_comms.orig_request.source_ip)
        else:
          last_ip = None

        if ping or clock or last_ip:
          try:
            data_store.REL_DB.WriteClientMetadata(
                client_id.Basename(),
                last_ip=last_ip,
                last_clock=clock,
                last_ping=ping,
                fleetspeak_enabled=False)
          except db.UnknownClientError:
            pass

    except communicator.UnknownClientCert:
      pass

    return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED