Ejemplo n.º 1
0
    def Handle(self, args, token=None):
        end = args.count or sys.maxint

        keywords = shlex.split(args.query)

        api_clients = []

        if data_store.RelationalDBReadEnabled():
            index = client_index.ClientIndex()

            clients = sorted(
                index.LookupClients(keywords))[args.offset:args.offset + end]

            client_infos = data_store.REL_DB.MultiReadClientFullInfo(clients)
            for client_info in client_infos.itervalues():
                api_clients.append(ApiClient().InitFromClientInfo(client_info))

        else:
            index = client_index.CreateClientIndex(token=token)

            result_urns = sorted(
                index.LookupClients(keywords))[args.offset:args.offset + end]

            result_set = aff4.FACTORY.MultiOpen(result_urns, token=token)

            for child in sorted(result_set):
                api_clients.append(ApiClient().InitFromAff4Object(child))

        return ApiSearchClientsResult(items=api_clients)
Ejemplo n.º 2
0
    def Start(self):
        inactive_client_ttl = config.CONFIG[
            "DataRetention.inactive_client_ttl"]
        if not inactive_client_ttl:
            self.Log("TTL not set - nothing to do...")
            return

        exception_label = config.CONFIG[
            "DataRetention.inactive_client_ttl_exception_label"]

        index = client_index.CreateClientIndex(token=self.token)

        client_urns = index.LookupClients(["."])

        deadline = rdfvalue.RDFDatetime.Now() - inactive_client_ttl

        for client_group in utils.Grouper(client_urns, 1000):
            inactive_client_urns = []
            for client in aff4.FACTORY.MultiOpen(
                    client_group,
                    mode="r",
                    aff4_type=aff4_grr.VFSGRRClient,
                    token=self.token):
                if exception_label in client.GetLabelsNames():
                    continue

                if client.Get(client.Schema.LAST) < deadline:
                    inactive_client_urns.append(client.urn)

            aff4.FACTORY.MultiDelete(inactive_client_urns, token=self.token)
            self.HeartBeat()
Ejemplo n.º 3
0
  def Platform(self, responses):
    """Stores information about the platform."""
    if responses.success:
      response = responses.First()

      # These need to be in separate attributes because they get searched on in
      # the GUI
      with self._OpenClient(mode="rw") as client:
        client.Set(client.Schema.HOSTNAME(response.node))
        client.Set(client.Schema.SYSTEM(response.system))
        client.Set(client.Schema.OS_RELEASE(response.release))
        client.Set(client.Schema.OS_VERSION(response.version))
        client.Set(client.Schema.KERNEL(response.kernel))
        client.Set(client.Schema.FQDN(response.fqdn))

        # response.machine is the machine value of platform.uname()
        # On Windows this is the value of:
        # HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session
        # Manager\Environment\PROCESSOR_ARCHITECTURE
        # "AMD64", "IA64" or "x86"
        client.Set(client.Schema.ARCH(response.machine))
        client.Set(
            client.Schema.UNAME("%s-%s-%s" % (response.system, response.release,
                                              response.version)))

        # Update the client index
        client_index.CreateClientIndex(token=self.token).AddClient(client)

      if response.system == "Windows":
        with aff4.FACTORY.Create(
            self.client_id.Add("registry"),
            standard.VFSDirectory,
            token=self.token) as fd:
          fd.Set(fd.Schema.PATHSPEC,
                 fd.Schema.PATHSPEC(
                     path="/", pathtype=rdf_paths.PathSpec.PathType.REGISTRY))

      # No support for OS X cloud machines as yet.
      if response.system in ["Linux", "Windows"]:
        self.CallClient(
            server_stubs.GetCloudVMMetadata,
            cloud.BuildCloudMetadataRequests(),
            next_state="CloudMetadata")

      known_system_type = True
    else:
      client = self._OpenClient()
      known_system_type = client.Get(client.Schema.SYSTEM)
      self.Log("Could not retrieve Platform info.")

    if known_system_type:
      # We will accept a partial KBInit rather than raise, so pass
      # require_complete=False.
      self.CallFlow(
          artifact.KnowledgeBaseInitializationFlow.__name__,
          require_complete=False,
          lightweight=self.args.lightweight,
          next_state="ProcessKnowledgeBase")
    else:
      self.Log("Unknown system type, skipping KnowledgeBaseInitializationFlow")
Ejemplo n.º 4
0
    def SetupClient(self,
                    client_nr,
                    index=None,
                    system=None,
                    os_version=None,
                    arch=None):
        """Prepares a test client mock to be used.

    Args:
      client_nr: int The GRR ID to be used. 0xABCD maps to C.100000000000abcd
                     in canonical representation.
      index: client_index.ClientIndex
      system: string
      os_version: string
      arch: string

    Returns:
      rdf_client.ClientURN
    """
        if index is not None:
            # `with:' is expected to be used in the calling function.
            client_id_urn = self._SetupClientImpl(client_nr, index, system,
                                                  os_version, arch)
        else:
            with client_index.CreateClientIndex(token=self.token) as index:
                client_id_urn = self._SetupClientImpl(client_nr, index, system,
                                                      os_version, arch)

        return client_id_urn
Ejemplo n.º 5
0
    def testAddTimestamp(self):
        index = client_index.CreateClientIndex(token=self.token)

        client_urns = self.SetupClients(5)
        # 1413807132 = Mon, 20 Oct 2014 12:12:12 GMT
        with test_lib.FakeTime(1413807132):
            for urn in client_urns:
                client = aff4.FACTORY.Create(urn,
                                             aff4_type=aff4_grr.VFSGRRClient,
                                             mode="r",
                                             token=self.token)
                index.AddClient(client)

        self.assertEqual(
            len(index.LookupClients([".", "start_date:2014-10-20"])), 5)
        self.assertEqual(
            len(index.LookupClients([".", "start_date:2014-10-21"])), 0)
        self.assertEqual(
            len(
                index.LookupClients(
                    [".", "start_date:2013-10-20", "end_date:2014-10-19"])), 0)
        self.assertEqual(
            len(
                index.LookupClients(
                    [".", "start_date:2013-10-20", "end_date:2014-10-20"])), 5)

        # Ignore the keyword if the date is not readable.
        self.assertEqual(
            len(
                index.LookupClients(
                    [".", "start_date:2013-10-20", "end_date:XXXX"])), 5)
Ejemplo n.º 6
0
  def ProcessKnowledgeBase(self, responses):
    """Collect and store any extra non-kb artifacts."""
    if not responses.success:
      raise flow.FlowError(
          "Error while collecting the knowledge base: %s" % responses.status)

    kb = responses.First()
    client = self._OpenClient(mode="rw")
    client.Set(client.Schema.KNOWLEDGE_BASE, kb)

    # Copy usernames.
    usernames = [user.username for user in kb.users if user.username]
    client.AddAttribute(client.Schema.USERNAMES(" ".join(usernames)))

    self.CopyOSReleaseFromKnowledgeBase(kb, client)
    client.Flush()

    artifact_list = [
        "WMILogicalDisks", "RootDiskVolumeUsage", "WMIComputerSystemProduct",
        "LinuxHardwareInfo", "OSXSPHardwareDataType"
    ]
    self.CallFlow(
        collectors.ArtifactCollectorFlow.__name__,
        artifact_list=artifact_list,
        next_state="ProcessArtifactResponses")

    # Update the client index
    client_index.CreateClientIndex(token=self.token).AddClient(client)
Ejemplo n.º 7
0
    def SetupClient(self,
                    client_nr,
                    arch="x86_64",
                    kernel="4.0.0",
                    os_version="buster/sid",
                    ping=None,
                    system="Linux"):
        """Prepares a test client mock to be used.

    Args:
      client_nr: int The GRR ID to be used. 0xABCD maps to C.100000000000abcd
                     in canonical representation.
      arch: string
      kernel: string
      os_version: string
      ping: RDFDatetime
      system: string

    Returns:
      rdf_client.ClientURN
    """
        with client_index.CreateClientIndex(token=self.token) as index:
            client_id_urn = self._SetupClientImpl(client_nr,
                                                  index=index,
                                                  arch=arch,
                                                  kernel=kernel,
                                                  os_version=os_version,
                                                  ping=ping,
                                                  system=system)

        return client_id_urn
Ejemplo n.º 8
0
    def Start(self):
        """Sign the CSR from the client."""
        with aff4.FACTORY.Create(self.client_id,
                                 aff4_grr.VFSGRRClient,
                                 mode="rw",
                                 token=self.token) as client:

            if self.args.csr.type != rdf_crypto.Certificate.Type.CSR:
                raise IOError("Must be called with CSR")

            csr = rdf_crypto.CertificateSigningRequest(self.args.csr.pem)
            # Verify the CSR. This is not strictly necessary but doesn't harm either.
            try:
                csr.Verify(csr.GetPublicKey())
            except rdf_crypto.VerificationError:
                raise flow.FlowError("CSR for client %s did not verify: %s" %
                                     (self.client_id, csr.AsPEM()))

            # Verify that the CN is of the correct form. The common name should refer
            # to a client URN.
            self.cn = rdf_client.ClientURN.FromPublicKey(csr.GetPublicKey())
            if self.cn != csr.GetCN():
                raise IOError("CSR CN %s does not match public key %s." %
                              (csr.GetCN(), self.cn))

            logging.info("Will sign CSR for: %s", self.cn)

            cert = rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr)

            # This check is important to ensure that the client id reported in the
            # source of the enrollment request is the same as the one in the
            # certificate. We use the ClientURN to ensure this is also of the correct
            # form for a client name.
            if self.cn != self.client_id:
                raise flow.FlowError(
                    "Certificate name %s mismatch for client %s", self.cn,
                    self.client_id)

            # Set and write the certificate to the client record.
            now = rdfvalue.RDFDatetime.Now()
            client.Set(client.Schema.CERT, cert)
            client.Set(client.Schema.FIRST_SEEN, now)
            if data_store.RelationalDBEnabled():
                data_store.REL_DB.WriteClientMetadata(
                    self.client_id.Basename(),
                    certificate=cert,
                    first_seen=now,
                    fleetspeak_enabled=False)

            index = client_index.CreateClientIndex(token=self.token)
            index.AddClient(client)
            if data_store.RelationalDBEnabled:
                index = client_index.ClientIndex()
                index.AddClient(self.client_id.Basename(),
                                data_migration.ConvertVFSGRRClient(client))

        # Publish the client enrollment message.
        self.Publish("ClientEnrollment", self.client_id)

        self.Log("Enrolled %s successfully", self.client_id)
Ejemplo n.º 9
0
    def Handle(self, args, token=None):
        audit_description = ",".join([
            token.username + u"." + utils.SmartUnicode(name)
            for name in args.labels
        ])
        audit_events = []

        try:
            index = client_index.CreateClientIndex(token=token)
            client_objs = aff4.FACTORY.MultiOpen(
                [cid.ToClientURN() for cid in args.client_ids],
                aff4_type=aff4_grr.VFSGRRClient,
                mode="rw",
                token=token)
            for client_obj in client_objs:
                index.RemoveClientLabels(client_obj)
                self.RemoveClientLabels(client_obj, args.labels)
                index.AddClient(client_obj)
                client_obj.Close()

                audit_events.append(
                    events.AuditEvent(
                        user=token.username,
                        action="CLIENT_REMOVE_LABEL",
                        flow_name="handler.ApiRemoveClientsLabelsHandler",
                        client=client_obj.urn,
                        description=audit_description))
        finally:
            events.Events.PublishMultipleEvents(
                {audit.AUDIT_EVENT: audit_events}, token=token)
Ejemplo n.º 10
0
    def testUnversionedKeywords(self):
        index = client_index.CreateClientIndex(token=self.token)

        client_urns = self.SetupClients(5)

        with test_lib.FakeTime(1000000):
            for i in range(5):
                client = aff4.FACTORY.Create(client_urns[i],
                                             aff4_type=aff4_grr.VFSGRRClient,
                                             mode="rw",
                                             token=self.token)
                client.Set(client.Schema.HOST_IPS("10.1.0.%d" % i))
                client.Flush()
                index.AddClient(client)

        with test_lib.FakeTime(2000000):
            for i in range(5):
                client = aff4.FACTORY.Create(client_urns[i],
                                             aff4_type=aff4_grr.VFSGRRClient,
                                             mode="rw",
                                             token=self.token)
                client.Set(client.Schema.HOST_IPS("10.1.1.%d" % i))
                client.Flush()
                index.AddClient(client)
        with test_lib.FakeTime(3000000):
            self.assertEqual(
                index.LookupClients(["10.1.0", "Host-2"]),
                [rdf_client.ClientURN("aff4:/C.1000000000000002")])
            self.assertEqual(index.LookupClients(["+10.1.0", "Host-2"]), [])
            self.assertEqual(
                index.LookupClients(["+10.1.1", "Host-2"]),
                [rdf_client.ClientURN("aff4:/C.1000000000000002")])
Ejemplo n.º 11
0
    def Handle(self, args, token=None):
        if args.count:
            end = args.offset + args.count
        else:
            end = sys.maxint

        keywords = shlex.split(args.query)

        index = client_index.CreateClientIndex(token=token)
        all_urns = set()
        for label in self.labels_whitelist:
            label_filter = ["label:" + label] + keywords
            all_urns.update(index.LookupClients(label_filter))

        all_objs = aff4.FACTORY.MultiOpen(sorted(all_urns, key=str),
                                          aff4_type=aff4_grr.VFSGRRClient,
                                          token=token)

        api_clients = []
        index = 0
        for client_obj in all_objs:
            if self._CheckClientLabels(client_obj):
                if index >= args.offset and index < end:
                    api_clients.append(
                        ApiClient().InitFromAff4Object(client_obj))

                index += 1
                if index >= end:
                    break

        return ApiSearchClientsResult(items=api_clients)
Ejemplo n.º 12
0
  def End(self):
    """Finalize client registration."""
    # Update summary and publish to the Discovery queue.

    if data_store.RelationalDBWriteEnabled():
      try:
        data_store.REL_DB.WriteClient(self.state.client)
      except db.UnknownClientError:
        pass

    if data_store.RelationalDBReadEnabled():
      summary = self.state.client.GetSummary()
      summary.client_id = self.client_id
    else:
      client = self._OpenClient()
      summary = client.GetSummary()

    self.Publish("Discovery", summary)
    self.SendReply(summary)

    # Update the client index
    client_index.CreateClientIndex(token=self.token).AddClient(client)
    if data_store.RelationalDBWriteEnabled():
      try:
        index = client_index.ClientIndex()
        index.AddClient(self.client_id.Basename(), self.state.client)
      except db.UnknownClientError:
        # TODO(amoser): Remove after data migration.
        pass
Ejemplo n.º 13
0
    def Handle(self, args, token=None):
        if args.count:
            end = args.offset + args.count
        else:
            end = sys.maxint

        keywords = shlex.split(args.query)
        api_clients = []

        if data_store.RelationalDBReadEnabled():
            index = client_index.ClientIndex()

            # TODO(amoser): We could move the label verification into the
            # database making this method more efficient. Label restrictions
            # should be on small subsets though so this might not be worth
            # it.
            all_client_ids = set()
            for label in self.labels_whitelist:
                label_filter = ["label:" + label] + keywords
                all_client_ids.update(index.LookupClients(label_filter))

            client_infos = data_store.REL_DB.MultiReadClientFullInfo(
                all_client_ids)

            index = 0
            for _, client_info in sorted(client_infos.items()):
                if not self._VerifyLabels(client_info.labels):
                    continue
                if index >= args.offset and index < end:
                    api_clients.append(
                        ApiClient().InitFromClientInfo(client_info))
                index += 1
                if index >= end:
                    break

        else:
            index = client_index.CreateClientIndex(token=token)
            all_urns = set()
            for label in self.labels_whitelist:
                label_filter = ["label:" + label] + keywords
                all_urns.update(index.LookupClients(label_filter))

            all_objs = aff4.FACTORY.MultiOpen(sorted(all_urns, key=str),
                                              aff4_type=aff4_grr.VFSGRRClient,
                                              token=token)

            index = 0
            for client_obj in all_objs:
                if not self._CheckClientLabels(client_obj):
                    continue
                if index >= args.offset and index < end:
                    api_clients.append(
                        ApiClient().InitFromAff4Object(client_obj))

                index += 1
                if index >= end:
                    break

        return ApiSearchClientsResult(items=api_clients)
Ejemplo n.º 14
0
  def testBulkLabelClients(self):
    index = client_index.CreateClientIndex(token=self.token)

    client_urns = self.SetupClients(2)
    for urn in client_urns:
      client = aff4.FACTORY.Create(
          urn, aff4_type=aff4_grr.VFSGRRClient, mode="rw", token=self.token)
      client.AddLabel("test_client")
      client.Flush()
      index.AddClient(client)

    # Maps hostnames used in the test to client urns.
    m = {"host-0": client_urns[0], "host-1": client_urns[1]}

    # No hostname.
    client_index.BulkLabel(
        "label-0", ["host-3"], token=self.token, client_index=index)
    self._HostsHaveLabel([], "label-0", index)

    # Add label.
    hosts = ["host-0", "host-1"]
    client_index.BulkLabel(
        "label-0", hosts, token=self.token, client_index=index)
    # host-0: label-0
    # host-1: label-0
    self._HostsHaveLabel(hosts, "label-0", index)
    self.assertItemsEqual(
        index.LookupClients(["label-0"]), [m[host] for host in hosts])

    # Add another label only changes the new host.
    hosts = ["host-1"]
    client_index.BulkLabel(
        "label-1", hosts, token=self.token, client_index=index)
    # host-0: label-0
    # host-1: label-0, label-1
    self._HostsHaveLabel(hosts, "label-1", index)
    self.assertItemsEqual(
        index.LookupClients(["label-1"]), [m[host] for host in hosts])
    # and other labels remain unchanged.
    hosts = ["host-0", "host-1"]
    self._HostsHaveLabel(hosts, "label-0", index)
    self.assertItemsEqual(
        index.LookupClients(["label-0"]), [m[host] for host in hosts])

    # Relabeling updates the label on already labeled hosts.
    hosts = ["host-0"]
    client_index.BulkLabel(
        "label-0", hosts, token=self.token, client_index=index)
    # host-0: label-0
    # host-1: label-1
    self._HostsHaveLabel(hosts, "label-0", index)
    self.assertItemsEqual(
        index.LookupClients(["label-0"]), [m[host] for host in hosts])
    # and other labels remain unchanged.
    hosts = ["host-1"]
    self._HostsHaveLabel(hosts, "label-1", index)
    self.assertItemsEqual(
        index.LookupClients(["label-1"]), [m[host] for host in hosts])
Ejemplo n.º 15
0
  def SetupClients(self, nr_clients, system=None, os_version=None, arch=None):
    """Prepares nr_clients test client mocks to be used."""
    with client_index.CreateClientIndex(token=self.token) as index:
      client_ids = [
          self.SetupClient(client_nr, index, system, os_version, arch)
          for client_nr in xrange(nr_clients)
      ]

    return client_ids
Ejemplo n.º 16
0
  def testAddLookupClients(self):
    index = client_index.CreateClientIndex(token=self.token)
    client_urns = self.SetupClients(42)
    for urn in client_urns:
      client = aff4.FACTORY.Create(
          urn, aff4_type=aff4_grr.VFSGRRClient, mode="r", token=self.token)
      index.AddClient(client)

    # Check unique identifiers.
    self.assertEqual(
        index.LookupClients(["192.168.0.1"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])
    self.assertEqual(
        index.LookupClients(["2001:aBcd::1"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])
    self.assertEqual(
        index.LookupClients(["ip:192.168.0.1"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])
    self.assertEqual(
        index.LookupClients(["ip:2001:abcd::1"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])
    self.assertEqual(
        index.LookupClients(["host-2"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000002")])
    self.assertEqual(
        index.LookupClients(["C.1000000000000002"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000002")])
    self.assertEqual(
        index.LookupClients(["aabbccddee01"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])
    self.assertEqual(
        index.LookupClients(["mac:aabbccddee01"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])
    self.assertEqual(
        index.LookupClients(["aa:bb:cc:dd:ee:01"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])
    self.assertEqual(
        index.LookupClients(["mac:aa:bb:cc:dd:ee:01"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000001")])

    # IP prefixes of octets should work:
    self.assertEqual(
        sorted(index.LookupClients(["192.168.0"])), sorted(client_urns))

    # Hostname prefixes of tokens should work.
    self.assertEqual(
        index.LookupClients(["host-5.example"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000005")])

    # Intersections should work.
    self.assertEqual(
        index.LookupClients(["192.168.0", "Host-2"]),
        [rdf_client.ClientURN("aff4:/C.1000000000000002")])

    # Universal keyword should find everything.
    self.assertEqual(len(index.LookupClients(["."])), 42)
Ejemplo n.º 17
0
    def End(self):
        """Finalize client registration."""
        # Update summary and publish to the Discovery queue.
        client = self._OpenClient()
        summary = client.GetSummary()
        self.Publish("Discovery", summary)
        self.SendReply(summary)

        # Update the client index
        client_index.CreateClientIndex(token=self.token).AddClient(client)
Ejemplo n.º 18
0
    def _CheckClientKwIndex(self, keywords, expected_count):
        # Tests that the client index has expected_count results when
        # searched for keywords.

        # AFF4 index.
        index = client_index.CreateClientIndex(token=self.token)
        self.assertEqual(len(index.LookupClients(keywords)), expected_count)

        # Relational index.
        index = client_index.ClientIndex()
        self.assertEqual(len(index.LookupClients(keywords)), expected_count)
Ejemplo n.º 19
0
  def _CheckLabelIndex(self):
    """Check that label indexes are updated."""
    index = client_index.CreateClientIndex(token=self.token)

    # AFF4 index.
    self.assertEqual(
        list(index.LookupClients(["label:Label2"])), [self.client_id])

    # Relational index.
    self.assertEqual(client_index.ClientIndex().LookupClients(["label:Label2"]),
                     [self.client_id.Basename()])
Ejemplo n.º 20
0
  def _MakeFixtures(self):
    token = access_control.ACLToken(username="******", reason="Make fixtures.")
    token = token.SetUID()

    for i in range(10):
      client_id = rdf_client.ClientURN("C.%016X" % i)
      with aff4.FACTORY.Create(
          client_id, aff4_grr.VFSGRRClient, mode="rw",
          token=token) as client_obj:
        index = client_index.CreateClientIndex(token=token)
        index.AddClient(client_obj)
Ejemplo n.º 21
0
    def EnrolFleetspeakClient(self, client_id):
        """Enrols a Fleetspeak-enabled client for use with GRR."""
        client_urn = rdf_client.ClientURN(client_id)

        # If already enrolled, return.
        if data_store.RelationalDBReadEnabled():
            if data_store.REL_DB.ReadClientMetadata(client_id):
                return
        else:
            if aff4.FACTORY.ExistsWithType(client_urn,
                                           aff4_type=aff4_grr.VFSGRRClient,
                                           token=self.token):
                return

        logging.info("Enrolling a new Fleetspeak client: %r", client_id)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteClientMetadata(client_id,
                                                  fleetspeak_enabled=True)

        # TODO(fleetspeak-team,grr-team): If aff4 isn't reliable enough, we can
        # catch exceptions from it and forward them to Fleetspeak by failing its
        # gRPC call. Fleetspeak will then retry with a random, perhaps healthier,
        # instance of the GRR frontend.
        with aff4.FACTORY.Create(client_urn,
                                 aff4_type=aff4_grr.VFSGRRClient,
                                 mode="rw",
                                 token=self.token) as client:

            client.Set(client.Schema.FLEETSPEAK_ENABLED,
                       rdfvalue.RDFBool(True))

            index = client_index.CreateClientIndex(token=self.token)
            index.AddClient(client)
            if data_store.RelationalDBWriteEnabled():
                index = client_index.ClientIndex()
                index.AddClient(data_migration.ConvertVFSGRRClient(client))

        enrollment_session_id = rdfvalue.SessionID(queue=queues.ENROLLMENT,
                                                   flow_name="Enrol")

        publish_msg = rdf_flows.GrrMessage(
            payload=client_urn,
            session_id=enrollment_session_id,
            # Fleetspeak ensures authentication.
            auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
            source=enrollment_session_id,
            priority=rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY)

        # Publish the client enrollment message.
        events.Events.PublishEvent("ClientEnrollment",
                                   publish_msg,
                                   token=self.token)
Ejemplo n.º 22
0
  def SetupClient(self,
                  client_nr,
                  arch="x86_64",
                  last_boot_time=None,
                  kernel="4.0.0",
                  os_version="buster/sid",
                  ping=None,
                  system="Linux",
                  memory_size=None,
                  add_cert=True):
    """Prepares a test client mock to be used.

    Args:
      client_nr: int The GRR ID to be used. 0xABCD maps to C.100000000000abcd
                     in canonical representation.
      arch: string
      last_boot_time: RDFDatetime
      kernel: string
      os_version: string
      ping: RDFDatetime
      system: string
      memory_size: bytes
      add_cert: boolean

    Returns:
      rdf_client.ClientURN
    """
    # Make it possible to use SetupClient for both REL_DB and legacy tests.
    self.SetupTestClientObject(
        client_nr,
        add_cert=add_cert,
        arch=arch,
        last_boot_time=last_boot_time,
        kernel=kernel,
        memory_size=memory_size,
        os_version=os_version,
        ping=ping or rdfvalue.RDFDatetime.Now(),
        system=system)

    with client_index.CreateClientIndex(token=self.token) as index:
      client_id_urn = self._SetupClientImpl(
          client_nr,
          index=index,
          arch=arch,
          last_boot_time=last_boot_time,
          kernel=kernel,
          os_version=os_version,
          ping=ping,
          system=system,
          memory_size=memory_size,
          add_cert=add_cert)

    return client_id_urn
Ejemplo n.º 23
0
  def End(self):
    """Finalize client registration."""
    # Update summary and publish to the Discovery queue.
    client = self._OpenClient()
    summary = client.GetSummary()
    self.Publish("Discovery", summary)
    self.SendReply(summary)

    # Update the client index
    client_index.CreateClientIndex(token=self.token).AddClient(client)
    if data_store.RelationalDBEnabled():
      index = client_index.ClientIndex()
      index.AddClient(self.client_id.Basename(),
                      data_migration.ConvertVFSGRRClient(client))
Ejemplo n.º 24
0
def CleanVacuousVersions(clients=None, dry_run=True):
  """A script to remove no-op client versions.

  This script removes versions of a client when it is identical to the previous,
  in the sense that no versioned attributes were changed since the previous
  client version.

  Args:
    clients: A list of ClientURN, if empty cleans all clients.
    dry_run: whether this is a dry run
  """

  if not clients:
    index = client_index.CreateClientIndex()
    clients = index.LookupClients(["."])
  clients.sort()
  with data_store.DB.GetMutationPool() as pool:

    logging.info("checking %d clients", len(clients))
    for batch in utils.Grouper(clients, 10000):
      # TODO(amoser): This only works on datastores that use the Bigtable
      # scheme.
      client_infos = data_store.DB.MultiResolvePrefix(
          batch, ["aff4:", "aff4:"], data_store.DB.ALL_TIMESTAMPS)

      for client, type_list in client_infos:
        cleared = 0
        kept = 0
        updates = []
        for a, _, ts in type_list:
          if ts != 0:
            updates.append((ts, a))
        updates = sorted(updates)
        dirty = True
        for ts, a in updates:
          if a == "aff4:type":
            if dirty:
              kept += 1
              dirty = False
            else:
              cleared += 1
              if not dry_run:
                pool.DeleteAttributes(client, ["aff4:type"], start=ts, end=ts)
                if pool.Size() > 1000:
                  pool.Flush()
          else:
            dirty = True
        logging.info("%s: kept %d and cleared %d", client, kept, cleared)
Ejemplo n.º 25
0
    def ProcessKnowledgeBase(self, responses):
        """Collect and store any extra non-kb artifacts."""
        if not responses.success:
            raise flow.FlowError(
                "Error while collecting the knowledge base: %s" %
                responses.status)

        kb = responses.First()
        # AFF4 client.
        client = self._OpenClient(mode="rw")
        client.Set(client.Schema.KNOWLEDGE_BASE, kb)

        # Copy usernames.
        usernames = [user.username for user in kb.users if user.username]
        client.AddAttribute(client.Schema.USERNAMES(" ".join(usernames)))

        self.CopyOSReleaseFromKnowledgeBase(kb, client)
        client.Flush()

        # objects.ClientSnapshot.

        # Information already present in the knowledge base takes precedence.
        if not kb.os:
            kb.os = self.state.system

        if not kb.fqdn:
            kb.fqdn = self.state.fqdn
        self.state.client.knowledge_base = kb

        artifact_list = [
            "WMILogicalDisks", "RootDiskVolumeUsage",
            "WMIComputerSystemProduct", "LinuxHardwareInfo",
            "OSXSPHardwareDataType"
        ]
        self.CallFlow(collectors.ArtifactCollectorFlow.__name__,
                      artifact_list=artifact_list,
                      next_state="ProcessArtifactResponses")

        # Update the client index for the AFF4 client.
        client_index.CreateClientIndex(token=self.token).AddClient(client)

        if data_store.RelationalDBWriteEnabled():
            try:
                # Update the client index for the objects.ClientSnapshot.
                client_index.ClientIndex().AddClient(self.client_id.Basename(),
                                                     self.state.client)
            except db.UnknownClientError:
                pass
Ejemplo n.º 26
0
def SearchClients(query_str, token=None, limit=1000):
  """Search indexes for clients. Returns list (client, hostname, os version)."""
  client_schema = aff4.AFF4Object.classes["VFSGRRClient"].SchemaCls
  index = client_index.CreateClientIndex(token=token)

  client_list = index.LookupClients([query_str])
  result_set = aff4.FACTORY.MultiOpen(client_list, token=token)
  results = []
  for result in result_set:
    results.append((result, str(result.Get(client_schema.HOSTNAME)), str(
        result.Get(client_schema.OS_VERSION)), str(
            result.Get(client_schema.PING))))
    if len(results) >= limit:
      break

  return results
Ejemplo n.º 27
0
    def Handle(self, args, token=None):
        end = args.count or sys.maxint

        keywords = shlex.split(args.query)

        index = client_index.CreateClientIndex(token=token)
        result_urns = sorted(
            index.LookupClients(keywords))[args.offset:args.offset + end]

        result_set = aff4.FACTORY.MultiOpen(result_urns, token=token)

        api_clients = []
        for child in sorted(result_set):
            api_clients.append(ApiClient().InitFromAff4Object(child))

        return ApiSearchClientsResult(items=api_clients)
Ejemplo n.º 28
0
def CleanClientVersions(clients=None, dry_run=True, token=None):
    """A script to remove excessive client versions.

  Especially when a client is heavily cloned, we sometimes write an excessive
  number of versions of it. Since these version all go into the same database
  row and are displayed as a dropdown list in the adminui, it is sometimes
  necessary to clear them out.

  This deletes version from clients so that we have at most one
  version per hour.

  Args:
    clients: A list of ClientURN, if empty cleans all clients.
    dry_run: whether this is a dry run
    token: datastore token.
  """
    if not clients:
        index = client_index.CreateClientIndex(token=token)
        clients = index.LookupClients(["."])
    clients.sort()
    with data_store.DB.GetMutationPool() as pool:

        logging.info("checking %d clients", len(clients))

        # TODO(amoser): This only works on datastores that use the Bigtable scheme.
        client_infos = data_store.DB.MultiResolvePrefix(
            clients, "aff4:type", data_store.DB.ALL_TIMESTAMPS)

        for client, type_list in client_infos:
            logging.info("%s: has %d versions", client, len(type_list))
            cleared = 0
            kept = 1
            last_kept = type_list[0][2]
            for _, _, ts in type_list[1:]:
                if last_kept - ts > 60 * 60 * 1000000:  # 1 hour
                    last_kept = ts
                    kept += 1
                else:
                    if not dry_run:
                        pool.DeleteAttributes(client, ["aff4:type"],
                                              start=ts,
                                              end=ts)
                    cleared += 1
                    if pool.Size() > 10000:
                        pool.Flush()
            logging.info("%s: kept %d and cleared %d", client, kept, cleared)
Ejemplo n.º 29
0
    def ProcessKnowledgeBase(self, responses):
        """Collect and store any extra non-kb artifacts."""
        if not responses.success:
            raise flow.FlowError("Error collecting artifacts: %s" %
                                 responses.status)

        # Collect any non-knowledgebase artifacts that will be stored in aff4.
        artifact_list = self._GetExtraArtifactsForCollection()
        if artifact_list:
            self.CallFlow(collectors.ArtifactCollectorFlow.__name__,
                          artifact_list=artifact_list,
                          next_state="ProcessArtifactResponses",
                          store_results_in_aff4=True)

        # Update the client index
        client = self._OpenClient()
        client_index.CreateClientIndex(token=self.token).AddClient(client)
Ejemplo n.º 30
0
    def ProcessKnowledgeBase(self, responses):
        """Collect and store any extra non-kb artifacts."""
        if not responses.success:
            raise flow.FlowError("Error collecting artifacts: %s" %
                                 responses.status)

        artifact_list = [
            "WMILogicalDisks", "RootDiskVolumeUsage",
            "WMIComputerSystemProduct", "LinuxHardwareInfo",
            "OSXSPHardwareDataType"
        ]
        self.CallFlow(collectors.ArtifactCollectorFlow.__name__,
                      artifact_list=artifact_list,
                      next_state="ProcessArtifactResponses")

        # Update the client index
        client = self._OpenClient()
        client_index.CreateClientIndex(token=self.token).AddClient(client)