예제 #1
0
    def End(self):
        """Finalize client registration."""
        # Update summary and publish to the Discovery queue.

        if data_store.RelationalDBWriteEnabled():
            try:
                data_store.REL_DB.WriteClientSnapshot(self.state.client)
            except db.UnknownClientError:
                pass

        client = self._OpenClient()

        if data_store.RelationalDBReadEnabled():
            summary = self.state.client.GetSummary()
            summary.client_id = self.client_id
            summary.timestamp = rdfvalue.RDFDatetime.Now()
        else:
            summary = client.GetSummary()

        self.Publish("Discovery", summary)
        self.SendReply(summary)

        # Update the client index
        client_index.CreateClientIndex(token=self.token).AddClient(client)
        if data_store.RelationalDBWriteEnabled():
            try:
                index = client_index.ClientIndex()
                index.AddClient(self.state.client)
                labels = self.state.client.startup_info.client_info.labels
                if labels:
                    data_store.REL_DB.AddClientLabels(
                        self.state.client.client_id, u"GRR", labels)
            except db.UnknownClientError:
                # TODO(amoser): Remove after data migration.
                pass
예제 #2
0
    def _CreateFile(self,
                    path,
                    content,
                    hashing=False,
                    aff4_type=aff4.AFF4MemoryStream):
        if hashing:
            digest = hashlib.sha256(content).digest()
        else:
            digest = None

        if data_store.RelationalDBReadEnabled("filestore"):
            self.assertTrue(data_store.RelationalDBWriteEnabled())
            self.assertTrue(hashing)
        else:
            with aff4.FACTORY.Create(path, aff4_type, token=self.token) as fd:
                fd.Write(content)

                if digest:
                    fd.Set(fd.Schema.HASH, rdf_crypto.Hash(sha256=digest))

        if data_store.RelationalDBWriteEnabled() and hashing:
            client_id, vfs_path = path.Split(2)
            path_type, components = rdf_objects.ParseCategorizedPath(vfs_path)

            path_info = rdf_objects.PathInfo()
            path_info.path_type = path_type
            path_info.components = components

            blob_id = rdf_objects.BlobID.FromBytes(digest)
            data_store.BLOBS.WriteBlobs({blob_id: content})
            hash_id = file_store.AddFileWithUnknownHash([blob_id])
            path_info.hash_entry.sha256 = hash_id.AsBytes()

            data_store.REL_DB.WritePathInfos(client_id, [path_info])
예제 #3
0
    def EnrolFleetspeakClient(self, client_id):
        """Enrols a Fleetspeak-enabled client for use with GRR.

    Args:
      client_id: GRR client-id for the client.

    Returns:
      True if the client is new, and actually got enrolled. This method
      is a no-op if the client already exists (in which case False is returned).
    """
        client_urn = rdf_client.ClientURN(client_id)

        # If already enrolled, return.
        if data_store.RelationalDBReadEnabled():
            try:
                data_store.REL_DB.ReadClientMetadata(client_id)
                return False
            except db.UnknownClientError:
                pass
        else:
            if aff4.FACTORY.ExistsWithType(client_urn,
                                           aff4_type=aff4_grr.VFSGRRClient,
                                           token=self.token):
                return False

        logging.info("Enrolling a new Fleetspeak client: %r", client_id)

        if data_store.RelationalDBWriteEnabled():
            now = rdfvalue.RDFDatetime.Now()
            data_store.REL_DB.WriteClientMetadata(client_id,
                                                  first_seen=now,
                                                  fleetspeak_enabled=True,
                                                  last_ping=now)

        if data_store.AFF4Enabled():
            # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
            # catch exceptions from it and forward them to Fleetspeak by failing its
            # gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
            # instance of the GRR frontend.
            with aff4.FACTORY.Create(client_urn,
                                     aff4_type=aff4_grr.VFSGRRClient,
                                     mode="rw",
                                     token=self.token) as client:

                client.Set(client.Schema.FLEETSPEAK_ENABLED,
                           rdfvalue.RDFBool(True))

                index = client_index.CreateClientIndex(token=self.token)
                index.AddClient(client)
                if data_store.RelationalDBWriteEnabled():
                    index = client_index.ClientIndex()
                    index.AddClient(data_migration.ConvertVFSGRRClient(client))

        # Publish the client enrollment message.
        events.Events.PublishEvent("ClientEnrollment",
                                   client_urn,
                                   token=self.token)
        return True
예제 #4
0
파일: ca_enroller.py 프로젝트: pettai/grr
    def Start(self):
        """Sign the CSR from the client."""

        if self.args.csr.type != rdf_crypto.Certificate.Type.CSR:
            raise ValueError("Must be called with CSR")

        csr = rdf_crypto.CertificateSigningRequest(self.args.csr.pem)
        # Verify the CSR. This is not strictly necessary but doesn't harm either.
        try:
            csr.Verify(csr.GetPublicKey())
        except rdf_crypto.VerificationError:
            raise flow.FlowError("CSR for client %s did not verify: %s" %
                                 (self.client_id, csr.AsPEM()))

        # Verify that the CN is of the correct form. The common name should refer
        # to a client URN.
        self.cn = rdf_client.ClientURN.FromPublicKey(csr.GetPublicKey())
        if self.cn != csr.GetCN():
            raise ValueError("CSR CN %s does not match public key %s." %
                             (csr.GetCN(), self.cn))

        logging.info("Will sign CSR for: %s", self.cn)

        cert = rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr)

        # This check is important to ensure that the client id reported in the
        # source of the enrollment request is the same as the one in the
        # certificate. We use the ClientURN to ensure this is also of the correct
        # form for a client name.
        if self.cn != self.client_id:
            raise flow.FlowError("Certificate name %s mismatch for client %s" %
                                 (self.cn, self.client_id))

        with aff4.FACTORY.Create(self.client_id,
                                 aff4_grr.VFSGRRClient,
                                 mode="rw",
                                 token=self.token) as client:
            # Set and write the certificate to the client record.
            now = rdfvalue.RDFDatetime.Now()
            client.Set(client.Schema.CERT, cert)
            client.Set(client.Schema.FIRST_SEEN, now)
            if data_store.RelationalDBWriteEnabled():
                data_store.REL_DB.WriteClientMetadata(self.client_id,
                                                      certificate=cert,
                                                      first_seen=now,
                                                      fleetspeak_enabled=False)

            index = client_index.CreateClientIndex(token=self.token)
            index.AddClient(client)
            if data_store.RelationalDBWriteEnabled():
                index = client_index.ClientIndex()
                index.AddClient(data_migration.ConvertVFSGRRClient(client))
        # Publish the client enrollment message.
        events.Events.PublishEvent("ClientEnrollment",
                                   self.client_urn,
                                   token=self.token)

        self.Log("Enrolled %s successfully", self.client_id)
예제 #5
0
    def FillClientStats(self, client_id):
        with aff4.FACTORY.Create(client_id.Add("stats"),
                                 aff4_type=aff4_stats.ClientStats,
                                 token=self.token,
                                 mode="rw") as stats_fd:
            for i in range(6):
                with test_lib.FakeTime((i + 1) * 10):
                    timestamp = int((i + 1) * 10 * 1e6)
                    st = rdf_client_stats.ClientStats()

                    sample = rdf_client_stats.CpuSample(timestamp=timestamp,
                                                        user_cpu_time=10 + i,
                                                        system_cpu_time=20 + i,
                                                        cpu_percent=10 + i)
                    st.cpu_samples.Append(sample)

                    sample = rdf_client_stats.IOSample(timestamp=timestamp,
                                                       read_bytes=10 + i,
                                                       write_bytes=10 + i * 2)
                    st.io_samples.Append(sample)

                    stats_fd.AddAttribute(stats_fd.Schema.STATS(st))

                    if data_store.RelationalDBWriteEnabled():
                        data_store.REL_DB.WriteClientStats(
                            client_id=client_id.Basename(), stats=st)
예제 #6
0
def CreateFileVersion(client_id,
                      path,
                      content=b"",
                      timestamp=None,
                      token=None):
    """Add a new version for a file."""
    if timestamp is None:
        timestamp = rdfvalue.RDFDatetime.Now()

    with test_lib.FakeTime(timestamp):
        with aff4.FACTORY.Create(client_id.Add(path),
                                 aff4_type=aff4_grr.VFSFile,
                                 mode="w",
                                 token=token) as fd:
            fd.Write(content)
            fd.Set(fd.Schema.CONTENT_LAST, rdfvalue.RDFDatetime.Now())

        if data_store.RelationalDBWriteEnabled():
            path_type, components = rdf_objects.ParseCategorizedPath(path)

            path_info = rdf_objects.PathInfo()
            path_info.path_type = path_type
            path_info.components = components
            path_info.directory = False

            data_store.REL_DB.WritePathInfos(client_id.Basename(), [path_info])
예제 #7
0
    def _WriteFileContent(self, response, mutation_pool=None):
        urn = response.stat_entry.pathspec.AFF4Path(self.client_urn)

        filedesc = aff4.FACTORY.Create(urn,
                                       aff4_grr.VFSBlobImage,
                                       token=self.token,
                                       mutation_pool=mutation_pool)

        with filedesc:
            filedesc.SetChunksize(response.transferred_file.chunk_size)
            filedesc.Set(filedesc.Schema.STAT, response.stat_entry)

            chunks = sorted(response.transferred_file.chunks,
                            key=lambda _: _.offset)
            for chunk in chunks:
                filedesc.AddBlob(chunk.digest, chunk.length)

            filedesc.Set(filedesc.Schema.CONTENT_LAST,
                         rdfvalue.RDFDatetime.Now())

        if data_store.RelationalDBWriteEnabled():
            path_info = rdf_objects.PathInfo.FromStatEntry(response.stat_entry)

            # Adding files to filestore requires reading data from RELDB,
            # thus protecting this code with a filestore-read-enabled check.
            if data_store.RelationalDBReadEnabled("filestore"):
                blob_ids = [
                    rdf_objects.BlobID.FromBytes(c.digest) for c in chunks
                ]
                hash_id = file_store.AddFileWithUnknownHash(blob_ids)
                path_info.hash_entry.sha256 = hash_id.AsBytes()

            data_store.REL_DB.WritePathInfos(self.client_id, [path_info])
예제 #8
0
def DeleteSignedBinary(binary_urn, token=None):
    """Deletes the binary with the given urn from the datastore.

  Args:
    binary_urn: RDFURN that serves as a unique identifier for the binary.
    token: ACL token to use with the legacy (non-relational) datastore.

  Raises:
    SignedBinaryNotFoundError: If the signed binary does not exist.
  """
    if _ShouldUseLegacyDatastore():
        try:
            aff4.FACTORY.Open(binary_urn,
                              aff4_type=aff4.AFF4Stream,
                              mode="r",
                              token=token)
        except aff4.InstantiationError:
            raise SignedBinaryNotFoundError(binary_urn)
        aff4.FACTORY.Delete(binary_urn, token=token)

    if data_store.RelationalDBWriteEnabled():
        try:
            data_store.REL_DB.ReadSignedBinaryReferences(
                _SignedBinaryIDFromURN(binary_urn))
        except db.UnknownSignedBinaryError:
            if _ShouldUseLegacyDatastore():
                # Migration of data isn't complete yet (we haven't started reading
                # exclusively from the relational DB), so this is probably ok.
                return
            else:
                raise SignedBinaryNotFoundError(binary_urn)
        data_store.REL_DB.DeleteSignedBinaryReferences(
            _SignedBinaryIDFromURN(binary_urn))
예제 #9
0
def WriteSignedBinaryBlobs(binary_urn, blobs, token=None):
    """Saves signed blobs to the datastore.

  If a signed binary with the given URN already exists, its contents will get
  overwritten.

  Args:
    binary_urn: RDFURN that should serve as a unique identifier for the binary.
    blobs: An Iterable of signed blobs to write to the datastore.
    token: ACL token to use with the legacy (non-relational) datastore.
  """
    if _ShouldUseLegacyDatastore():
        aff4.FACTORY.Delete(binary_urn, token=token)
        with data_store.DB.GetMutationPool() as mutation_pool:
            with aff4.FACTORY.Create(binary_urn,
                                     collects.GRRSignedBlob,
                                     mode="w",
                                     mutation_pool=mutation_pool,
                                     token=token) as fd:
                for blob in blobs:
                    fd.Add(blob, mutation_pool=mutation_pool)

    if data_store.RelationalDBWriteEnabled():
        blob_references = rdf_objects.BlobReferences()
        current_offset = 0
        for blob in blobs:
            blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
                blob.SerializeToString())
            blob_references.items.Append(
                rdf_objects.BlobReference(offset=current_offset,
                                          size=len(blob.data),
                                          blob_id=blob_id))
            current_offset += len(blob.data)
        data_store.REL_DB.WriteSignedBinaryReferences(
            _SignedBinaryIDFromURN(binary_urn), blob_references)
예제 #10
0
파일: vfs_test.py 프로젝트: slad99/grr
  def SetupTestTimeline(self):
    client_id = self.SetupClient(0)
    fixture_test_lib.ClientFixture(client_id, token=self.token)

    # Choose some directory with pathspec in the ClientFixture.
    self.category_path = "fs/os"
    self.folder_path = self.category_path + "/Users/中国新闻网新闻中/Shared"
    self.file_path = self.folder_path + "/a.txt"

    file_urn = client_id.Add(self.file_path)
    for i in range(0, 5):
      with test_lib.FakeTime(i):
        stat_entry = rdf_client.StatEntry()
        stat_entry.st_mtime = rdfvalue.RDFDatetimeSeconds.Now()
        stat_entry.pathspec.path = self.file_path[len(self.category_path):]
        stat_entry.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS

        hash_entry = rdf_crypto.Hash(
            sha256=("0e8dc93e150021bb4752029ebbff51394aa36f069cf19901578"
                    "e4f06017acdb5").decode("hex"))

        with aff4.FACTORY.Create(
            file_urn, aff4_grr.VFSFile, mode="w", token=self.token) as fd:
          fd.Set(fd.Schema.STAT, stat_entry)
          fd.Set(fd.Schema.HASH, hash_entry)

        if data_store.RelationalDBWriteEnabled():
          cid = client_id.Basename()
          path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
          path_info.hash_entry = hash_entry
          data_store.REL_DB.WritePathInfos(cid, [path_info])

    return client_id
예제 #11
0
def WriteStatEntries(stat_entries, client_id, mutation_pool, token=None):
    """Persists information about stat entries.

  Args:
    stat_entries: A list of `StatEntry` instances.
    client_id: An id of a client the stat entries come from.
    mutation_pool: A mutation pool used for writing into the AFF4 data store.
    token: A token used for writing into the AFF4 data store.
  """

    for stat_response in stat_entries:
        if stat_response.pathspec.last.stream_name:
            # This is an ads. In that case we always need to create a file or
            # we won't be able to access the data. New clients send the correct mode
            # already but to make sure, we set this to a regular file anyways.
            # Clear all file type bits:
            stat_response.st_mode &= ~stat_type_mask
            stat_response.st_mode |= stat.S_IFREG

    if data_store.AFF4Enabled():
        for stat_entry in stat_entries:
            CreateAFF4Object(stat_entry,
                             client_id_urn=rdf_client.ClientURN(client_id),
                             mutation_pool=mutation_pool,
                             token=token)

    if data_store.RelationalDBWriteEnabled():
        path_infos = list(map(rdf_objects.PathInfo.FromStatEntry,
                              stat_entries))
        data_store.REL_DB.WritePathInfos(client_id, path_infos)
예제 #12
0
    def StoreResults(self, responses):
        """Stores the results returned by the client to the db."""
        if not responses.success:
            raise flow.FlowError(responses.status)

        self.state.files_found = len(responses)
        files_to_publish = []
        with data_store.DB.GetMutationPool() as pool:
            transferred_file_responses = []
            stat_entries = []
            for response in responses:
                if response.HasField("transferred_file"):
                    transferred_file_responses.append(response)
                elif response.HasField("stat_entry"):
                    stat_entries.append(response.stat_entry)

            if data_store.AFF4Enabled():
                self._WriteFilesContentAff4(transferred_file_responses,
                                            mutation_pool=pool)
            if data_store.RelationalDBWriteEnabled():
                self._WriteFilesContentRel(transferred_file_responses)

            self._WriteStatEntries(stat_entries, mutation_pool=pool)

            for response in responses:
                self.SendReply(response)

                if stat.S_ISREG(response.stat_entry.st_mode):
                    files_to_publish.append(
                        response.stat_entry.pathspec.AFF4Path(self.client_urn))

        if files_to_publish and not data_store.RelationalDBReadEnabled(
                "filestore"):
            events.Events.PublishMultipleEvents(
                {"LegacyFileStore.AddFileToStore": files_to_publish})
예제 #13
0
    def ProcessClients(self, responses):
        """Does the work."""
        del responses

        end = rdfvalue.RDFDatetime.Now() - db.CLIENT_STATS_RETENTION
        client_urns = export_utils.GetAllClients(token=self.token)

        for batch in collection.Batch(client_urns, 10000):
            with data_store.DB.GetMutationPool() as mutation_pool:
                for client_urn in batch:
                    mutation_pool.DeleteAttributes(
                        client_urn.Add("stats"), [u"aff4:stats"],
                        start=0,
                        end=end.AsMicrosecondsSinceEpoch())
            self.HeartBeat()

        if data_store.RelationalDBWriteEnabled():
            total_deleted_count = 0
            for deleted_count in data_store.REL_DB.DeleteOldClientStats(
                    yield_after_count=_STATS_DELETION_BATCH_SIZE,
                    retention_time=end):
                self.HeartBeat()
                total_deleted_count += deleted_count
            self.Log("Deleted %d ClientStats that expired before %s",
                     total_deleted_count, end)
예제 #14
0
def WriteAllCrashDetails(client_id,
                         crash_details,
                         flow_session_id=None,
                         hunt_session_id=None,
                         token=None):
    """Updates the last crash attribute of the client."""
    # AFF4.
    if data_store.AFF4Enabled():
        with aff4.FACTORY.Create(client_id, aff4_grr.VFSGRRClient,
                                 token=token) as client_obj:
            client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details))

        # Duplicate the crash information in a number of places so we can find it
        # easily.
        client_urn = rdf_client.ClientURN(client_id)
        client_crashes = aff4_grr.VFSGRRClient.CrashCollectionURNForCID(
            client_urn)
        with data_store.DB.GetMutationPool() as pool:
            grr_collections.CrashCollection.StaticAdd(client_crashes,
                                                      crash_details,
                                                      mutation_pool=pool)

    # Relational db.
    if data_store.RelationalDBWriteEnabled():
        try:
            data_store.REL_DB.WriteClientCrashInfo(client_id, crash_details)
        except db.UnknownClientError:
            pass

    if not flow_session_id:
        return

    if data_store.RelationalDBFlowsEnabled():
        flow_id = flow_session_id.Basename()
        data_store.REL_DB.UpdateFlow(client_id,
                                     flow_id,
                                     client_crash_info=crash_details)

        flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
        if flow_obj.parent_hunt_id:
            db_compat.ProcessHuntClientCrash(flow_obj,
                                             client_crash_info=crash_details)

    # TODO(amoser): Registering crashes in hunts is currently not implemented for
    # the relational db.
    if not data_store.RelationalDBFlowsEnabled():
        with aff4.FACTORY.Open(flow_session_id,
                               flow.GRRFlow,
                               mode="rw",
                               age=aff4.NEWEST_TIME,
                               token=token) as aff4_flow:
            aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details))

        hunt_session_id = ExtractHuntId(flow_session_id)
        if hunt_session_id and hunt_session_id != flow_session_id:
            hunt_obj = aff4.FACTORY.Open(hunt_session_id,
                                         aff4_type=implementation.GRRHunt,
                                         mode="rw",
                                         token=token)
            hunt_obj.RegisterCrash(crash_details)
예제 #15
0
    def List(self, responses):
        """Collect the directory listing and store in the datastore."""
        if not responses.success:
            raise flow.FlowError(str(responses.status))

        self.Log("Listed %s", self.state.urn)

        with data_store.DB.GetMutationPool() as pool:
            with aff4.FACTORY.Create(self.state.urn,
                                     standard.VFSDirectory,
                                     mode="w",
                                     mutation_pool=pool,
                                     token=self.token) as fd:
                fd.Set(fd.Schema.PATHSPEC(self.state.stat.pathspec))
                fd.Set(fd.Schema.STAT(self.state.stat))

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo.FromStatEntry(self.state.stat)
                data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

            stat_entries = list(map(rdf_client_fs.StatEntry, responses))
            WriteStatEntries(stat_entries,
                             client_id=self.client_id,
                             mutation_pool=pool,
                             token=self.token)

            for stat_entry in stat_entries:
                self.SendReply(stat_entry)  # Send Stats to parent flows.
예제 #16
0
  def ProcessStat(self, responses):
    # Did it work?
    if not responses.success:
      # It's better to raise rather than merely logging since it will
      # make it to the flow's protobuf and users can
      # inspect the reason this flow failed.
      raise IOError("Could not stat file: %s" % responses.status)

    client_stat = responses.First()

    # Update the pathspec to the one we got from the client.
    self.state.pathspec = client_stat.pathspec

    # If the file was big enough, we'll store it as an AFF4SparseImage
    if client_stat.st_size > self.args.size_threshold:
      urn = self.state.pathspec.AFF4Path(self.client_urn)

      # TODO(user) When we can check the last update time of the
      # contents of a file, raise if the contents have been updated before here.

      fd = aff4.FACTORY.Create(
          urn, aff4_type=standard.AFF4SparseImage, token=self.token, mode="rw")
      fd.Set(fd.Schema.PATHSPEC, self.state.pathspec)
      fd.Set(fd.Schema.STAT, client_stat)
      fd.Flush()

      if data_store.RelationalDBWriteEnabled():
        path_info = rdf_objects.PathInfo.FromStatEntry(client_stat)
        data_store.REL_DB.WritePathInfos(self.client_id, [path_info])
    else:
      # Otherwise, just get the whole file.
      self.CallFlow(
          transfer.MultiGetFile.__name__,
          pathspecs=[self.state.pathspec],
          next_state="End")
예제 #17
0
def AddFakeAuditLog(description=None,
                    client=None,
                    user=None,
                    action=None,
                    flow_name=None,
                    urn=None,
                    router_method_name=None,
                    http_request_path=None,
                    token=None):
  events.Events.PublishEvent(
      "Audit",
      rdf_events.AuditEvent(
          description=description,
          client=client,
          urn=urn,
          user=user,
          action=action,
          flow_name=flow_name),
      token=token)

  if data_store.RelationalDBWriteEnabled():
    data_store.REL_DB.WriteAPIAuditEntry(
        rdf_objects.APIAuditEntry(
            username=user,
            router_method_name=router_method_name,
            http_request_path=http_request_path,
        ))
예제 #18
0
    def StoreResults(self, responses):
        """Stores the results returned from the client."""
        if not responses.success:
            raise IOError(responses.status)

        with data_store.DB.GetMutationPool() as pool:
            for response in responses:
                # Create the file in the VFS
                vfs_urn = response.hit.pathspec.AFF4Path(self.client_urn)

                if stat.S_ISDIR(response.hit.st_mode):
                    fd = aff4.FACTORY.Create(vfs_urn,
                                             standard.VFSDirectory,
                                             mutation_pool=pool,
                                             token=self.token)
                else:
                    fd = aff4.FACTORY.Create(vfs_urn,
                                             aff4_grr.VFSFile,
                                             mutation_pool=pool,
                                             token=self.token)

                with fd:
                    stat_response = fd.Schema.STAT(response.hit)
                    fd.Set(stat_response)
                    fd.Set(fd.Schema.PATHSPEC(response.hit.pathspec))

                if data_store.RelationalDBWriteEnabled():
                    path_info = rdf_objects.PathInfo.FromStatEntry(
                        response.hit)
                    data_store.REL_DB.WritePathInfos(self.client_id,
                                                     [path_info])

                # Send the stat to the parent flow.
                self.SendReply(stat_response)
예제 #19
0
파일: fingerprint.py 프로젝트: vismid86/grr
    def ProcessFingerprint(self, responses):
        """Store the fingerprint response."""
        if not responses.success:
            # Its better to raise rather than merely logging since it will make it to
            # the flow's protobuf and users can inspect the reason this flow failed.
            raise flow.FlowError("Could not fingerprint file: %s" %
                                 responses.status)

        response = responses.First()
        if response.pathspec.path:
            pathspec = response.pathspec
        else:
            pathspec = self.args.pathspec

        self.state.urn = pathspec.AFF4Path(self.client_urn)

        with aff4.FACTORY.Create(self.state.urn,
                                 aff4_grr.VFSFile,
                                 mode="w",
                                 token=self.token) as fd:
            hash_obj = response.hash
            fd.Set(fd.Schema.HASH, hash_obj)

        if data_store.RelationalDBWriteEnabled():
            path_info = rdf_objects.PathInfo.FromPathSpec(pathspec)
            path_info.hash_entry = response.hash

            data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

        self.ReceiveFileFingerprint(self.state.urn,
                                    hash_obj,
                                    request_data=responses.request_data)
예제 #20
0
def CreateDirectory(client_path, token=None):
    """Creates a directory in datastore-agnostic way.

  Args:
    client_path: A `ClientPath` instance specifying location of the file.
    token: A GRR token for accessing the data store.
  """
    precondition.AssertType(client_path, db.ClientPath)

    stat_entry = rdf_client_fs.StatEntry(pathspec=rdf_paths.PathSpec(
        pathtype=client_path.path_type, path="/".join(client_path.components)),
                                         st_mode=16895)

    if data_store.RelationalDBWriteEnabled():

        path_info = rdf_objects.PathInfo()
        path_info.path_type = client_path.path_type
        path_info.components = client_path.components
        path_info.stat_entry = stat_entry

        data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info])

    urn = aff4.ROOT_URN.Add(client_path.client_id).Add(client_path.vfs_path)
    with aff4.FACTORY.Create(urn, aff4_standard.VFSDirectory,
                             token=token) as filedesc:
        filedesc.Set(filedesc.Schema.STAT, stat_entry)
        filedesc.Set(filedesc.Schema.PATHSPEC, stat_entry.pathspec)
예제 #21
0
  def testSwitchingBetweenFileVersionsRefreshesDownloadTab(self):
    urn_a = rdfvalue.RDFURN("%s/fs/os/c/Downloads/a.txt" % self.client_id)
    path_info = rdf_objects.PathInfo.OS(components=["c", "Downloads", "a.txt"])

    # Test files are set up using self.CreateFileVersions call in test's
    # setUp method. Amend created file versions by adding different
    # hashes to versions corresponding to different times.
    # Note that a string passed to fd.Schema.HASH constructor will be
    # printed as a hexademical bytestring. Thus "111" will become "313131"
    # and "222" will become "323232".
    with test_lib.FakeTime(gui_test_lib.TIME_0):
      with aff4.FACTORY.Create(
          urn_a,
          aff4_type=aff4_grr.VFSFile,
          force_new_version=False,
          object_exists=True) as fd:
        fd.Set(fd.Schema.HASH(sha256=b"111"))

      if data_store.RelationalDBWriteEnabled():
        path_info.hash_entry.sha256 = b"111"
        data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

    with test_lib.FakeTime(gui_test_lib.TIME_1):
      with aff4.FACTORY.Create(
          urn_a,
          aff4_type=aff4_grr.VFSFile,
          force_new_version=False,
          object_exists=True) as fd:
        fd.Set(fd.Schema.HASH(sha256=b"222"))

      if data_store.RelationalDBWriteEnabled():
        path_info.hash_entry.sha256 = b"222"
        data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

    # Open a URL corresponding to a HEAD version of the file.
    self.Open("/#/clients/%s/vfs/fs/os/c/Downloads/a.txt?tab=download" %
              self.client_id)
    # Make sure displayed hash value is correct.
    self.WaitUntil(self.IsElementPresent,
                   "css=tr:contains('Sha256') td:contains('323232')")

    # Select the previous file version.
    self.Click("css=select.version-dropdown > option:contains(\"%s\")" %
               gui_test_lib.DateString(gui_test_lib.TIME_0))
    # Make sure displayed hash value gets updated.
    self.WaitUntil(self.IsElementPresent,
                   "css=tr:contains('Sha256') td:contains('313131')")
예제 #22
0
  def WriteBuffer(self, responses):
    """Write the hash received to the blob image."""

    # Note that hashes must arrive at this state in the correct order since they
    # are sent in the correct order (either via CallState or CallClient).
    index = responses.request_data["index"]
    if index not in self.state.pending_files:
      return

    # Failed to read the file - ignore it.
    if not responses.success:
      self._FileFetchFailed(index, responses.request.request.name)
      return

    response = responses.First()
    file_tracker = self.state.pending_files.get(index)
    if file_tracker:
      file_tracker.setdefault("blobs", []).append((response.data,
                                                   response.length))

      download_size = file_tracker["size_to_download"]
      if (response.length < self.CHUNK_SIZE or
          response.offset + response.length >= download_size):

        # Write the file to the data store.
        stat_entry = file_tracker["stat_entry"]
        urn = stat_entry.pathspec.AFF4Path(self.client_id)

        with aff4.FACTORY.Create(
            urn, aff4_grr.VFSBlobImage, mode="w", token=self.token) as fd:

          fd.SetChunksize(self.CHUNK_SIZE)
          fd.Set(fd.Schema.STAT(stat_entry))
          fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec))
          fd.Set(fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now()))

          for digest, length in file_tracker["blobs"]:
            fd.AddBlob(digest, length)

          # Save some space.
          del file_tracker["blobs"]

        if data_store.RelationalDBWriteEnabled():
          client_id = self.client_id.Basename()
          path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
          data_store.REL_DB.WritePathInfos(client_id, [path_info])

        # File done, remove from the store and close it.
        self._ReceiveFetchedFile(file_tracker)

        # Publish the new file event to cause the file to be added to the
        # filestore.
        self.Publish("FileStore.AddFileToStore", urn)

        self.state.files_fetched += 1

        if not self.state.files_fetched % 100:
          self.Log("Fetched %d of %d files.", self.state.files_fetched,
                   self.state.files_to_fetch)
예제 #23
0
    def ProcessKnowledgeBase(self, responses):
        """Collect and store any extra non-kb artifacts."""
        if not responses.success:
            raise flow.FlowError(
                "Error while collecting the knowledge base: %s" %
                responses.status)

        kb = responses.First()
        if data_store.AFF4Enabled():
            # AFF4 client.
            client = self._OpenClient(mode="rw")
            with client:
                client.Set(client.Schema.KNOWLEDGE_BASE, kb)

                # Copy usernames.
                usernames = [
                    user.username for user in kb.users if user.username
                ]
                client.AddAttribute(
                    client.Schema.USERNAMES(" ".join(usernames)))

                self.CopyOSReleaseFromKnowledgeBase(kb, client)

        # rdf_objects.ClientSnapshot.

        # Information already present in the knowledge base takes precedence.
        if not kb.os:
            kb.os = self.state.os

        if not kb.fqdn:
            kb.fqdn = self.state.fqdn

        self.state.client.knowledge_base = kb

        if data_store.RelationalDBReadEnabled():
            existing_client = data_store.REL_DB.ReadClientSnapshot(
                self.client_id)
            if existing_client is None:
                # This is the first time we interrogate this client. In that case, we
                # need to store basic information about this client right away so follow
                # up flows work properly.
                data_store.REL_DB.WriteClientSnapshot(self.state.client)

        self.CallFlow(collectors.ArtifactCollectorFlow.__name__,
                      artifact_list=config.
                      CONFIG["Artifacts.non_kb_interrogate_artifacts"],
                      knowledge_base=kb,
                      next_state="ProcessArtifactResponses")

        if data_store.AFF4Enabled():
            # Update the client index for the AFF4 client.
            client_index.CreateClientIndex(token=self.token).AddClient(client)

        if data_store.RelationalDBWriteEnabled():
            try:
                # Update the client index for the rdf_objects.ClientSnapshot.
                client_index.ClientIndex().AddClient(self.state.client)
            except db.UnknownClientError:
                pass
예제 #24
0
    def ReadBuffer(self, responses):
        """Read the buffer and write to the file."""
        # Did it work?
        if responses.success:
            response = responses.First()
            if not response:
                raise IOError("Missing hash for offset %s missing" %
                              response.offset)

            if response.offset <= self.state.max_chunk_number * self.CHUNK_SIZE:
                # Response.data is the hash of the block (32 bytes) and
                # response.length is the length of the block.
                self.state.blobs.append((response.data, response.length))
                self.Log("Received blob hash %s", response.data.encode("hex"))

                # Add one more chunk to the window.
                self.FetchWindow(1)

            if response.offset + response.length >= self.state.file_size:
                # File is complete.
                stat_entry = self.state.stat_entry
                urn = self.state.stat_entry.AFF4Path(self.client_urn)

                # TODO(user): when all the code can read files from REL_DB,
                # protect this with:
                # if not data_store.RelationalDBReadEnabled(category="filestore"):
                with aff4.FACTORY.Create(urn,
                                         aff4_grr.VFSBlobImage,
                                         token=self.token) as fd:
                    fd.SetChunksize(self.CHUNK_SIZE)
                    fd.Set(fd.Schema.STAT(stat_entry))

                    for data, length in self.state.blobs:
                        fd.AddBlob(data, length)
                        fd.Set(fd.Schema.CONTENT_LAST,
                               rdfvalue.RDFDatetime.Now())

                if data_store.RelationalDBWriteEnabled():
                    path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)

                    # Adding files to filestore requires reading data from RELDB,
                    # thus protecting this code with a filestore-read-enabled check.
                    if data_store.RelationalDBReadEnabled("filestore"):
                        blob_ids = [
                            rdf_objects.BlobID.FromBytes(data)
                            for data, _ in self.state.blobs
                        ]

                        hash_id = file_store.AddFileWithUnknownHash(blob_ids)

                        path_info.hash_entry.sha256 = hash_id.AsBytes()

                    data_store.REL_DB.WritePathInfos(self.client_id,
                                                     [path_info])

                # Save some space.
                del self.state["blobs"]
                self.state.success = True
예제 #25
0
  def WriteBuffer(self, responses):
    """Write the hash received to the blob image."""

    index = responses.request_data["index"]
    if index not in self.state.pending_files:
      return

    # Failed to read the file - ignore it.
    if not responses.success:
      self._FileFetchFailed(index, responses.request.request.name)
      return

    response = responses.First()
    file_tracker = self.state.pending_files.get(index)
    if file_tracker:
      blob_dict = file_tracker.setdefault("blobs", {})
      blob_index = responses.request_data["blob_index"]
      blob_dict[blob_index] = (response.data, response.length)

      if len(blob_dict) == len(file_tracker["hash_list"]):
        # Write the file to the data store.
        stat_entry = file_tracker["stat_entry"]
        urn = stat_entry.pathspec.AFF4Path(self.client_urn)

        with aff4.FACTORY.Create(
            urn, aff4_grr.VFSBlobImage, mode="w", token=self.token) as fd:

          fd.SetChunksize(self.CHUNK_SIZE)
          fd.Set(fd.Schema.STAT(stat_entry))
          fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec))
          fd.Set(fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now()))

          for index in sorted(blob_dict):
            digest, length = blob_dict[index]
            fd.AddBlob(digest, length)

          # Save some space.
          del file_tracker["blobs"]
          del file_tracker["hash_list"]

        if data_store.RelationalDBWriteEnabled():
          path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
          data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

        # File done, remove from the store and close it.
        self._ReceiveFetchedFile(file_tracker)

        # Publish the new file event to cause the file to be added to the
        # filestore.
        events.Events.PublishEvent(
            "FileStore.AddFileToStore", urn, token=self.token)

        self.state.files_fetched += 1

        if not self.state.files_fetched % 100:
          self.Log("Fetched %d of %d files.", self.state.files_fetched,
                   self.state.files_to_fetch)
예제 #26
0
    def EnrolFleetspeakClient(self, client_id):
        """Enrols a Fleetspeak-enabled client for use with GRR."""
        client_urn = rdf_client.ClientURN(client_id)

        # If already enrolled, return.
        if data_store.RelationalDBReadEnabled():
            if data_store.REL_DB.ReadClientMetadata(client_id):
                return
        else:
            if aff4.FACTORY.ExistsWithType(client_urn,
                                           aff4_type=aff4_grr.VFSGRRClient,
                                           token=self.token):
                return

        logging.info("Enrolling a new Fleetspeak client: %r", client_id)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id,
                                                  fleetspeak_enabled=True)

        # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
        # catch exceptions from it and forward them to Fleetspeak by failing its
        # gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
        # instance of the GRR frontend.
        with aff4.FACTORY.Create(client_urn,
                                 aff4_type=aff4_grr.VFSGRRClient,
                                 mode="rw",
                                 token=self.token) as client:

            client.Set(client.Schema.FLEETSPEAK_ENABLED,
                       rdfvalue.RDFBool(True))

            index = client_index.CreateClientIndex(token=self.token)
            index.AddClient(client)
            if data_store.RelationalDBWriteEnabled():
                index = client_index.ClientIndex()
                index.AddClient(data_migration.ConvertVFSGRRClient(client))

        # Publish the client enrollment message.
        events.Events.PublishEvent("ClientEnrollment",
                                   client_urn,
                                   token=self.token)
예제 #27
0
def CreateFile(client_path, content=b"", token=None):
    """Creates a file in datastore-agnostic way.

  Args:
    client_path: A `ClientPath` instance specifying location of the file.
    content: A content to write to the file.
    token: A GRR token for accessing the data store.
  """
    precondition.AssertType(client_path, db.ClientPath)
    precondition.AssertType(content, bytes)

    blob_id = rdf_objects.BlobID.FromBlobData(content)

    stat_entry = rdf_client_fs.StatEntry(pathspec=rdf_paths.PathSpec(
        pathtype=client_path.path_type, path="/".join(client_path.components)),
                                         st_mode=33206,
                                         st_size=len(content))

    if data_store.RelationalDBWriteEnabled():
        data_store.BLOBS.WriteBlobs({blob_id: content})
        blob_ref = rdf_objects.BlobReference(size=len(content),
                                             offset=0,
                                             blob_id=blob_id)
        hash_id = file_store.AddFileWithUnknownHash(client_path, [blob_ref])

        path_info = rdf_objects.PathInfo()
        path_info.path_type = client_path.path_type
        path_info.components = client_path.components
        path_info.hash_entry.num_bytes = len(content)
        path_info.hash_entry.sha256 = hash_id.AsBytes()
        path_info.stat_entry = stat_entry

        data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info])

    if data_store.AFF4Enabled():
        urn = aff4.ROOT_URN.Add(client_path.client_id).Add(
            client_path.vfs_path)
        with aff4.FACTORY.Create(urn, aff4_grr.VFSBlobImage,
                                 token=token) as filedesc:
            bio = io.BytesIO()
            bio.write(content)
            bio.seek(0)

            filedesc.AppendContent(bio)
            filedesc.Set(filedesc.Schema.STAT, stat_entry)

            filedesc.Set(
                filedesc.Schema.HASH,
                rdf_crypto.Hash(sha256=rdf_objects.SHA256HashID.FromData(
                    content).AsBytes(),
                                num_bytes=len(content)))

            filedesc.Set(filedesc.Schema.CONTENT_LAST,
                         rdfvalue.RDFDatetime.Now())
예제 #28
0
    def ProcessMessage(self, message):
        """Begins an enrollment flow for this client.

    Args:
        message: The Certificate sent by the client. Note that this message is
          not authenticated.
    """
        cert = rdf_crypto.Certificate(message.payload)

        queue = self.well_known_session_id.Queue()

        client_id = message.source

        # It makes no sense to enrol the same client multiple times, so we
        # eliminate duplicates. Note, that we can still enroll clients multiple
        # times due to cache expiration.
        try:
            enrolment_cache.Get(client_id)
            return
        except KeyError:
            enrolment_cache.Put(client_id, 1)

        # Create a new client object for this client.
        if data_store.AFF4Enabled():
            client = aff4.FACTORY.Create(client_id,
                                         aff4_grr.VFSGRRClient,
                                         mode="rw",
                                         token=self.token)
            client_cert = client.Get(client.Schema.CERT)

        if data_store.RelationalDBReadEnabled():
            try:
                md = data_store.REL_DB.ReadClientMetadata(client_id.Basename())
                client_cert = md.certificate
            except db.UnknownClientError:
                client_cert = None

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id.Basename(),
                                                  fleetspeak_enabled=False)

        # Only enroll this client if it has no certificate yet.
        if not client_cert:
            # Start the enrollment flow for this client.

            # Note, that the actual CAEnroler class is autogenerated from the
            # CAEnrolerMixin by the DualDBFlow decorator confusing the linter - hence
            # the disable directive.
            flow.StartAFF4Flow(
                client_id=client_id,
                flow_name=CAEnroler.__name__,  # pylint: disable=undefined-variable
                csr=cert,
                queue=queue,
                token=self.token)
예제 #29
0
파일: find.py 프로젝트: slad99/grr
    def IterateFind(self, responses):
        """Iterate in this state until no more results are available."""
        if not responses.success:
            raise IOError(responses.status)

        with data_store.DB.GetMutationPool() as pool:
            for response in responses:
                # Create the file in the VFS
                vfs_urn = response.hit.pathspec.AFF4Path(self.client_id)

                if stat.S_ISDIR(response.hit.st_mode):
                    fd = aff4.FACTORY.Create(vfs_urn,
                                             standard.VFSDirectory,
                                             mutation_pool=pool,
                                             token=self.token)
                else:
                    fd = aff4.FACTORY.Create(vfs_urn,
                                             aff4_grr.VFSFile,
                                             mutation_pool=pool,
                                             token=self.token)

                with fd:
                    stat_response = fd.Schema.STAT(response.hit)
                    fd.Set(stat_response)
                    fd.Set(fd.Schema.PATHSPEC(response.hit.pathspec))

                if data_store.RelationalDBWriteEnabled():
                    client_id = self.client_id.Basename()
                    path_info = rdf_objects.PathInfo.FromStatEntry(
                        response.hit)
                    data_store.REL_DB.WritePathInfos(client_id, [path_info])

                # Send the stat to the parent flow.
                self.SendReply(stat_response)

        self.state.received_count += len(responses)

        # Exit if we hit the max result count we wanted or we're finished. Note that
        # we may exceed the max_results if the iteration yielded too many results,
        # we simply will not return to the client for another iteration.
        if (self.state.received_count < self.args.max_results and
                responses.iterator.state != responses.iterator.State.FINISHED):

            self.args.findspec.iterator = responses.iterator

            # If we are close to max_results reduce the iterator.
            self.args.findspec.iterator.number = min(
                self.args.findspec.iterator.number,
                self.args.max_results - self.state.received_count)

            self.CallClient(server_stubs.Find,
                            self.args.findspec,
                            next_state="IterateFind")
            self.Log("%d files processed.", self.state.received_count)
예제 #30
0
    def Execute(self):
        """Runs the migration procedure."""
        if not data_store.RelationalDBWriteEnabled():
            raise ValueError("No relational database available.")

        sys.stdout.write("Collecting clients...\n")
        users = self._GetUsers()

        sys.stdout.write("Users to migrate: {}\n".format(len(users)))
        for u in users:
            self._MigrateUser(u)