Esempio n. 1
0
    def VerifyMessageSignature(self, response_comms, packed_message_list,
                               cipher, cipher_verified, api_version,
                               remote_public_key):
        """Verifies the message list signature.

    In the server we check that the timestamp is later than the ping timestamp
    stored with the client. This ensures that client responses can not be
    replayed.

    Args:
      response_comms: The raw response_comms rdfvalue.
      packed_message_list: The PackedMessageList rdfvalue from the server.
      cipher: The cipher object that should be used to verify the message.
      cipher_verified: If True, the cipher's signature is not verified again.
      api_version: The api version we should use.
      remote_public_key: The public key of the source.
    Returns:
      An rdf_flows.GrrMessage.AuthorizationState.
    """
        if (not cipher_verified
                and not cipher.VerifyCipherSignature(remote_public_key)):
            stats.STATS.IncrementCounter("grr_unauthenticated_messages")
            return rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED

        try:
            client_id = cipher.cipher_metadata.source
            try:
                client = self.client_cache.Get(client_id)
            except KeyError:
                client = aff4.FACTORY.Create(
                    client_id,
                    aff4.AFF4Object.classes["VFSGRRClient"],
                    mode="rw",
                    token=self.token)
                self.client_cache.Put(client_id, client)
                stats.STATS.SetGaugeValue(
                    "grr_frontendserver_client_cache_size",
                    len(self.client_cache))

            ip = response_comms.orig_request.source_ip
            client.Set(client.Schema.CLIENT_IP(ip))

            # The very first packet we see from the client we do not have its clock
            remote_time = client.Get(
                client.Schema.CLOCK) or rdfvalue.RDFDatetime(0)
            client_time = packed_message_list.timestamp or rdfvalue.RDFDatetime(
                0)

            # This used to be a strict check here so absolutely no out of
            # order messages would be accepted ever. Turns out that some
            # proxies can send your request with some delay even if the
            # client has already timed out (and sent another request in
            # the meantime, making the first one out of order). In that
            # case we would just kill the whole flow as a
            # precaution. Given the behavior of those proxies, this seems
            # now excessive and we have changed the replay protection to
            # only trigger on messages that are more than one hour old.

            if client_time < remote_time - rdfvalue.Duration("1h"):
                logging.warning("Message desynchronized for %s: %s >= %s",
                                client_id, remote_time, client_time)
                # This is likely an old message
                return rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED

            stats.STATS.IncrementCounter("grr_authenticated_messages")

            # Update the client and server timestamps only if the client
            # time moves forward.
            if client_time > remote_time:
                client.Set(client.Schema.CLOCK, client_time)
                client.Set(client.Schema.PING, rdfvalue.RDFDatetime.Now())

                clock = client_time
                ping = rdfvalue.RDFDatetime.Now()

                for label in client.Get(client.Schema.LABELS, []):
                    stats.STATS.IncrementCounter("client_pings_by_label",
                                                 fields=[label.name])
            else:
                clock = None
                ping = None
                logging.warning("Out of order message for %s: %s >= %s",
                                client_id, remote_time, client_time)

            client.Flush()
            if data_store.RelationalDBWriteEnabled():
                source_ip = response_comms.orig_request.source_ip
                if source_ip:
                    last_ip = rdf_client_network.NetworkAddress(
                        human_readable_address=response_comms.orig_request.
                        source_ip)
                else:
                    last_ip = None

                if ping or clock or last_ip:
                    try:
                        data_store.REL_DB.WriteClientMetadata(
                            client_id.Basename(),
                            last_ip=last_ip,
                            last_clock=clock,
                            last_ping=ping,
                            fleetspeak_enabled=False)
                    except db.UnknownClientError:
                        pass

        except communicator.UnknownClientCert:
            pass

        return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
Esempio n. 2
0
 def ForceGC(self):
   self._last_gc_run = rdfvalue.RDFDatetime(0)
   self._RunGC()
Esempio n. 3
0
    def WriteBuffer(self, responses):
        """Write the hash received to the blob image."""

        index = responses.request_data["index"]
        if index not in self.state.pending_files:
            return

        # Failed to read the file - ignore it.
        if not responses.success:
            self._FileFetchFailed(index, responses.request.request.name)
            return

        response = responses.First()
        file_tracker = self.state.pending_files.get(index)
        if file_tracker:
            blob_dict = file_tracker.setdefault("blobs", {})
            blob_index = responses.request_data["blob_index"]
            blob_dict[blob_index] = (response.data, response.length)

            if len(blob_dict) == len(file_tracker["hash_list"]):
                # Write the file to the data store.
                stat_entry = file_tracker["stat_entry"]
                urn = stat_entry.pathspec.AFF4Path(self.client_urn)

                if data_store.AFF4Enabled():
                    with aff4.FACTORY.Create(urn,
                                             aff4_grr.VFSBlobImage,
                                             mode="w",
                                             token=self.token) as fd:

                        fd.SetChunksize(self.CHUNK_SIZE)
                        fd.Set(fd.Schema.STAT(stat_entry))
                        fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec))
                        fd.Set(
                            fd.Schema.CONTENT_LAST(
                                rdfvalue.RDFDatetime().Now()))

                        for index in sorted(blob_dict):
                            digest, length = blob_dict[index]
                            fd.AddBlob(rdf_objects.BlobID.FromBytes(digest),
                                       length)

                if data_store.RelationalDBWriteEnabled():
                    path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)

                    # Adding files to filestore requires reading data from RELDB,
                    # thus protecting this code with a filestore-read-enabled check.
                    if data_store.RelationalDBReadEnabled("filestore"):
                        blob_ids = []
                        for index in sorted(blob_dict):
                            digest, _ = blob_dict[index]
                            blob_ids.append(
                                rdf_objects.BlobID.FromBytes(digest))

                        hash_obj = file_tracker["hash_obj"]

                        hash_id = file_store.AddFileWithUnknownHash(blob_ids)
                        # If the hash that we've calculated matches what we got from the
                        # client, then simply store the full hash entry.
                        # Otherwise store just the hash that we've calculated.
                        if hash_id.AsBytes() == hash_obj.sha256:
                            path_info.hash_entry = hash_obj
                        else:
                            path_info.hash_entry.sha256 = hash_id.AsBytes()

                        # Publish the add file event to cause the file to be added to the
                        # filestore.
                        events.Events.PublishEvent(
                            "FileStore.Add",
                            rdf_file_store.FileStoreAddEvent(
                                hash_id=hash_id, blob_ids=blob_ids),
                            token=self.token)

                    data_store.REL_DB.WritePathInfos(self.client_id,
                                                     [path_info])

                # Publish the new file event to cause the file to be added to the
                # filestore.
                events.Events.PublishEvent("LegacyFileStore.AddFileToStore",
                                           urn,
                                           token=self.token)

                # Save some space.
                del file_tracker["blobs"]
                del file_tracker["hash_list"]

                # File done, remove from the store and close it.
                self._ReceiveFetchedFile(file_tracker)

                self.state.files_fetched += 1

                if not self.state.files_fetched % 100:
                    self.Log("Fetched %d of %d files.",
                             self.state.files_fetched,
                             self.state.files_to_fetch)
Esempio n. 4
0
    def VerifyMessageSignature(self, response_comms, packed_message_list,
                               cipher, cipher_verified, api_version,
                               remote_public_key):
        """Verifies the message list signature.

    In the server we check that the timestamp is later than the ping timestamp
    stored with the client. This ensures that client responses can not be
    replayed.

    Args:
      response_comms: The raw response_comms rdfvalue.
      packed_message_list: The PackedMessageList rdfvalue from the server.
      cipher: The cipher object that should be used to verify the message.
      cipher_verified: If True, the cipher's signature is not verified again.
      api_version: The api version we should use.
      remote_public_key: The public key of the source.
    Returns:
      An rdf_flows.GrrMessage.AuthorizationState.
    """
        if (not cipher_verified
                and not cipher.VerifyCipherSignature(remote_public_key)):
            stats.STATS.IncrementCounter("grr_unauthenticated_messages")
            return rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED

        try:
            client_id = cipher.cipher_metadata.source.Basename()
            metadata = data_store.REL_DB.ReadClientMetadata(client_id)
            client_time = packed_message_list.timestamp or rdfvalue.RDFDatetime(
                0)

            # This used to be a strict check here so absolutely no out of
            # order messages would be accepted ever. Turns out that some
            # proxies can send your request with some delay even if the
            # client has already timed out (and sent another request in
            # the meantime, making the first one out of order). In that
            # case we would just kill the whole flow as a
            # precaution. Given the behavior of those proxies, this seems
            # now excessive and we have changed the replay protection to
            # only trigger on messages that are more than one hour old.
            if metadata and metadata.clock:
                stored_client_time = metadata.clock

                if client_time < stored_client_time - rdfvalue.Duration("1h"):
                    logging.warning("Message desynchronized for %s: %s >= %s",
                                    client_id, stored_client_time, client_time)
                    # This is likely an old message
                    return rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED

                stats.STATS.IncrementCounter("grr_authenticated_messages")

                # Update the client and server timestamps only if the client
                # time moves forward.
                if client_time <= stored_client_time:
                    logging.warning("Out of order message for %s: %s >= %s",
                                    client_id, stored_client_time, client_time)
                    return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED

            stats.STATS.IncrementCounter("grr_authenticated_messages")

            for label in data_store.REL_DB.ReadClientLabels(client_id):
                stats.STATS.IncrementCounter("client_pings_by_label",
                                             fields=[label.name])

            source_ip = response_comms.orig_request.source_ip
            if source_ip:
                last_ip = rdf_client_network.NetworkAddress(
                    human_readable_address=response_comms.orig_request.
                    source_ip)
            else:
                last_ip = None

            data_store.REL_DB.WriteClientMetadata(
                client_id,
                last_ip=last_ip,
                last_clock=client_time,
                last_ping=rdfvalue.RDFDatetime.Now(),
                fleetspeak_enabled=False)

        except communicator.UnknownClientCert:
            pass

        return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
Esempio n. 5
0
 def _GetLastForemanRunTime(self, client_id):
   md = data_store.REL_DB.ReadClientMetadata(client_id)
   return md.last_foreman_time or rdfvalue.RDFDatetime(0)
Esempio n. 6
0
class ApiDeletePendingUserNotificationHandlerTest(
    api_test_lib.ApiCallHandlerTest):
  """Test for ApiDeletePendingUserNotificationHandler."""

  TIME_0 = rdfvalue.RDFDatetime(42 * rdfvalue.MICROSECONDS)
  TIME_1 = TIME_0 + rdfvalue.Duration("1d")
  TIME_2 = TIME_1 + rdfvalue.Duration("1d")

  def setUp(self):
    super(ApiDeletePendingUserNotificationHandlerTest, self).setUp()
    self.handler = user_plugin.ApiDeletePendingUserNotificationHandler()
    self.client_id = self.SetupClient(0)

    with test_lib.FakeTime(self.TIME_0):
      notification.Notify(
          self.token.username,
          rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED,
          "<some message>",
          rdf_objects.ObjectReference(
              reference_type=rdf_objects.ObjectReference.Type.CLIENT,
              client=rdf_objects.ClientReference(
                  client_id=self.client_id.Basename())))

      notification.Notify(
          self.token.username,
          rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED,
          "<some message with identical time>",
          rdf_objects.ObjectReference(
              reference_type=rdf_objects.ObjectReference.Type.CLIENT,
              client=rdf_objects.ClientReference(
                  client_id=self.client_id.Basename())))

    with test_lib.FakeTime(self.TIME_1):
      notification.Notify(
          self.token.username,
          rdf_objects.UserNotification.Type.TYPE_CLIENT_APPROVAL_GRANTED,
          "<some other message>",
          rdf_objects.ObjectReference(
              reference_type=rdf_objects.ObjectReference.Type.CLIENT,
              client=rdf_objects.ClientReference(
                  client_id=self.client_id.Basename())))

  def _GetNotifications(self):
    user_record = aff4.FACTORY.Create(
        aff4.ROOT_URN.Add("users").Add(self.token.username),
        aff4_type=aff4_users.GRRUser,
        mode="r",
        token=self.token)

    pending = user_record.Get(user_record.Schema.PENDING_NOTIFICATIONS)
    shown = user_record.Get(user_record.Schema.SHOWN_NOTIFICATIONS)
    return (pending, shown)

  def testDeletesFromPendingAndAddsToShown(self):
    # Check that there are three pending notifications and no shown ones yet.
    (pending, shown) = self._GetNotifications()
    self.assertEqual(len(pending), 3)
    self.assertEqual(len(shown), 0)

    # Delete a pending notification.
    args = user_plugin.ApiDeletePendingUserNotificationArgs(
        timestamp=self.TIME_1)
    self.handler.Handle(args, token=self.token)

    # After the deletion, two notifications should be pending and one shown.
    (pending, shown) = self._GetNotifications()
    self.assertEqual(len(pending), 2)
    self.assertEqual(len(shown), 1)
    self.assertTrue("<some other message>" in shown[0].message)
    self.assertEqual(shown[0].timestamp, self.TIME_1)

  def testRaisesOnDeletingMultipleNotifications(self):
    # Check that there are three pending notifications and no shown ones yet.
    (pending, shown) = self._GetNotifications()
    self.assertEqual(len(pending), 3)
    self.assertEqual(len(shown), 0)

    # Delete all pending notifications on TIME_0.
    args = user_plugin.ApiDeletePendingUserNotificationArgs(
        timestamp=self.TIME_0)
    with self.assertRaises(aff4_users.UniqueKeyError):
      self.handler.Handle(args, token=self.token)

    # Check that the notifications were not changed in the process.
    (pending, shown) = self._GetNotifications()
    self.assertEqual(len(pending), 3)
    self.assertEqual(len(shown), 0)

  def testUnknownTimestampIsIgnored(self):
    # Check that there are three pending notifications and no shown ones yet.
    (pending, shown) = self._GetNotifications()
    self.assertEqual(len(pending), 3)
    self.assertEqual(len(shown), 0)

    # A timestamp not matching any pending notifications does not change any of
    # the collections.
    args = user_plugin.ApiDeletePendingUserNotificationArgs(
        timestamp=self.TIME_2)
    self.handler.Handle(args, token=self.token)

    # We should still have the same number of pending and shown notifications.
    (pending, shown) = self._GetNotifications()
    self.assertEqual(len(pending), 3)
    self.assertEqual(len(shown), 0)
Esempio n. 7
0
 def testAddNumber(self):
   date = rdfvalue.RDFDatetime(1e9)
   self.assertEqual(int(date + 60), 1e9 + 60e6)
   self.assertEqual(int(date + 1000.23), 1e9 + 1000230e3)
   self.assertEqual(int(date + (-10)), 1e9 - 10e6)
Esempio n. 8
0
 def testSubNumber(self):
   date = rdfvalue.RDFDatetime(1e9)
   self.assertEqual(int(date - 60), 1e9 - 60e6)
   self.assertEqual(int(date - (-1000.23)), 1e9 + 1000230e3)
   self.assertEqual(int(date - 1e12), 1e9 - 1e18)
Esempio n. 9
0
    def WriteBuffer(self, responses):
        """Write the hash received to the blob image."""

        index = responses.request_data["index"]
        if index not in self.state.pending_files:
            return

        # Failed to read the file - ignore it.
        if not responses.success:
            self._FileFetchFailed(index)
            return

        response = responses.First()
        file_tracker = self.state.pending_files.get(index)
        if not file_tracker:
            return

        blob_dict = file_tracker.setdefault("blobs", {})
        blob_index = responses.request_data["blob_index"]
        blob_dict[blob_index] = (response.data, response.length)

        if len(blob_dict) != len(file_tracker["hash_list"]):
            # We need more data before we can write the file.
            return

        # Write the file to the data store.
        stat_entry = file_tracker["stat_entry"]
        urn = stat_entry.pathspec.AFF4Path(self.client_urn)

        if data_store.AFF4Enabled():
            with aff4.FACTORY.Create(urn,
                                     aff4_grr.VFSBlobImage,
                                     mode="w",
                                     token=self.token) as fd:

                fd.SetChunksize(self.CHUNK_SIZE)
                fd.Set(fd.Schema.STAT(stat_entry))
                fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec))
                fd.Set(fd.Schema.CONTENT_LAST(rdfvalue.RDFDatetime().Now()))

                for index in sorted(blob_dict):
                    digest, length = blob_dict[index]
                    fd.AddBlob(rdf_objects.BlobID.FromBytes(digest), length)

        if data_store.RelationalDBEnabled():
            path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)

            blob_refs = []
            offset = 0
            for index in sorted(blob_dict):
                digest, size = blob_dict[index]
                blob_refs.append(
                    rdf_objects.BlobReference(
                        offset=offset,
                        size=size,
                        blob_id=rdf_objects.BlobID.FromBytes(digest)))
                offset += size

            hash_obj = file_tracker["hash_obj"]

            client_path = db.ClientPath.FromPathInfo(self.client_id, path_info)
            hash_id = file_store.AddFileWithUnknownHash(
                client_path,
                blob_refs,
                use_external_stores=self.state.use_external_stores)
            # If the hash that we've calculated matches what we got from the
            # client, then simply store the full hash entry.
            # Otherwise store just the hash that we've calculated.
            if hash_id.AsBytes() == hash_obj.sha256:
                path_info.hash_entry = hash_obj
            else:
                path_info.hash_entry.sha256 = hash_id.AsBytes()

            data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

        # Save some space.
        del file_tracker["blobs"]
        del file_tracker["hash_list"]

        # File done, remove from the store and close it.
        self._ReceiveFetchedFile(file_tracker)

        self.state.files_fetched += 1

        if not self.state.files_fetched % 100:
            self.Log("Fetched %d of %d files.", self.state.files_fetched,
                     self.state.files_to_fetch)
Esempio n. 10
0
def TSToRDFDatetime(ts):
    """Convert a protobuf.Timestamp to an RDFDatetime."""
    return rdfvalue.RDFDatetime(ts.seconds * 1000000 + ts.nanos // 1000)
Esempio n. 11
0
 def testAddDuration(self):
     duration = rdfvalue.Duration("12h")
     date = rdfvalue.RDFDatetime(1e9)
     self.assertEqual(int(date + duration), 1e9 + 12 * 3600e6)
     duration = rdfvalue.Duration("-60s")
     self.assertEqual(int(date + duration), 1e9 - 60e6)
Esempio n. 12
0
class VfsTestMixin(object):
    """A helper mixin providing methods to prepare files and flows for testing.
  """

    time_0 = rdfvalue.RDFDatetime(42)
    time_1 = time_0 + rdfvalue.Duration("1d")
    time_2 = time_1 + rdfvalue.Duration("1d")

    # TODO(hanuszczak): This function not only contains a lot of code duplication
    # but is also a duplication with `gui_test_lib.CreateFileVersion(s)`. This
    # should be refactored in the near future.
    def CreateFileVersions(self, client_id, file_path):
        """Add a new version for a file."""
        path_type, components = rdf_objects.ParseCategorizedPath(file_path)

        with test_lib.FakeTime(self.time_1):
            token = access_control.ACLToken(username="******")
            fd = aff4.FACTORY.Create(client_id.Add(file_path),
                                     aff4.AFF4MemoryStream,
                                     mode="w",
                                     token=token)
            fd.Write("Hello World".encode("utf-8"))
            fd.Close()

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo()
                path_info.path_type = path_type
                path_info.components = components
                path_info.directory = False

                data_store.REL_DB.WritePathInfos(client_id.Basename(),
                                                 [path_info])

        with test_lib.FakeTime(self.time_2):
            fd = aff4.FACTORY.Create(client_id.Add(file_path),
                                     aff4.AFF4MemoryStream,
                                     mode="w",
                                     token=token)
            fd.Write("Goodbye World".encode("utf-8"))
            fd.Close()

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo()
                path_info.path_type = path_type
                path_info.components = components
                path_info.directory = False

                data_store.REL_DB.WritePathInfos(client_id.Basename(),
                                                 [path_info])

    def CreateRecursiveListFlow(self, client_id, token):
        flow_args = filesystem.RecursiveListDirectoryArgs()

        return flow.StartAFF4Flow(
            client_id=client_id,
            flow_name=filesystem.RecursiveListDirectory.__name__,
            args=flow_args,
            token=token)

    def CreateMultiGetFileFlow(self, client_id, file_path, token):
        pathspec = rdf_paths.PathSpec(path=file_path,
                                      pathtype=rdf_paths.PathSpec.PathType.OS)
        flow_args = transfer.MultiGetFileArgs(pathspecs=[pathspec])

        return flow.StartAFF4Flow(client_id=client_id,
                                  flow_name=transfer.MultiGetFile.__name__,
                                  args=flow_args,
                                  token=token)
Esempio n. 13
0
class ApiDeletePendingUserNotificationHandlerTest(
        db_test_lib.RelationalDBEnabledMixin, api_test_lib.ApiCallHandlerTest):
    """Test for ApiDeletePendingUserNotificationHandler."""

    TIME_0 = rdfvalue.RDFDatetime(42 * rdfvalue.MICROSECONDS)
    TIME_1 = TIME_0 + rdfvalue.DurationSeconds("1d")
    TIME_2 = TIME_1 + rdfvalue.DurationSeconds("1d")

    def setUp(self):
        super(ApiDeletePendingUserNotificationHandlerTest, self).setUp()
        self.handler = user_plugin.ApiDeletePendingUserNotificationHandler()
        self.client_id = self.SetupClient(0)

        with test_lib.FakeTime(self.TIME_0):
            notification.Notify(
                self.token.username,
                rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED,
                "<some message>",
                rdf_objects.ObjectReference(
                    reference_type=rdf_objects.ObjectReference.Type.CLIENT,
                    client=rdf_objects.ClientReference(
                        client_id=self.client_id.Basename())))

            notification.Notify(
                self.token.username,
                rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED,
                "<some message with identical time>",
                rdf_objects.ObjectReference(
                    reference_type=rdf_objects.ObjectReference.Type.CLIENT,
                    client=rdf_objects.ClientReference(
                        client_id=self.client_id.Basename())))

        with test_lib.FakeTime(self.TIME_1):
            notification.Notify(
                self.token.username,
                rdf_objects.UserNotification.Type.TYPE_CLIENT_APPROVAL_GRANTED,
                "<some other message>",
                rdf_objects.ObjectReference(
                    reference_type=rdf_objects.ObjectReference.Type.CLIENT,
                    client=rdf_objects.ClientReference(
                        client_id=self.client_id.Basename())))

    def _GetNotifications(self):
        pending = data_store.REL_DB.ReadUserNotifications(
            self.token.username,
            state=rdf_objects.UserNotification.State.STATE_PENDING)
        shown = data_store.REL_DB.ReadUserNotifications(
            self.token.username,
            state=rdf_objects.UserNotification.State.STATE_NOT_PENDING)
        return pending, shown

    def testDeletesFromPendingAndAddsToShown(self):
        # Check that there are three pending notifications and no shown ones yet.
        (pending, shown) = self._GetNotifications()
        self.assertLen(pending, 3)
        self.assertEmpty(shown)

        # Delete a pending notification.
        args = user_plugin.ApiDeletePendingUserNotificationArgs(
            timestamp=self.TIME_1)
        self.handler.Handle(args, token=self.token)

        # After the deletion, two notifications should be pending and one shown.
        (pending, shown) = self._GetNotifications()
        self.assertLen(pending, 2)
        self.assertLen(shown, 1)
        self.assertIn("<some other message>", shown[0].message)
        self.assertEqual(shown[0].timestamp, self.TIME_1)

    def testUnknownTimestampIsIgnored(self):
        # Check that there are three pending notifications and no shown ones yet.
        (pending, shown) = self._GetNotifications()
        self.assertLen(pending, 3)
        self.assertEmpty(shown)

        # A timestamp not matching any pending notifications does not change any of
        # the collections.
        args = user_plugin.ApiDeletePendingUserNotificationArgs(
            timestamp=self.TIME_2)
        self.handler.Handle(args, token=self.token)

        # We should still have the same number of pending and shown notifications.
        (pending, shown) = self._GetNotifications()
        self.assertLen(pending, 3)
        self.assertEmpty(shown)
Esempio n. 14
0
File: hunt.py Progetto: vismid86/grr
    def Handle(self, args, token=None):
        if not args.hunt_id:
            raise ValueError("hunt_id can't be None")

        if not args.client_id:
            raise ValueError("client_id can't be None")

        if not args.vfs_path:
            raise ValueError("vfs_path can't be None")

        if not args.timestamp:
            raise ValueError("timestamp can't be None")

        api_vfs.ValidateVfsPath(args.vfs_path)

        results = implementation.GRRHunt.ResultCollectionForHID(
            args.hunt_id.ToURN())

        expected_aff4_path = args.client_id.ToClientURN().Add(args.vfs_path)
        # TODO(user): should after_timestamp be strictly less than the desired
        # timestamp.
        timestamp = rdfvalue.RDFDatetime(int(args.timestamp) - 1)

        # If the entry corresponding to a given path is not found within
        # MAX_RECORDS_TO_CHECK from a given timestamp, we report a 404.
        for _, item in results.Scan(
                after_timestamp=timestamp.AsMicrosecondsSinceEpoch(),
                max_records=self.MAX_RECORDS_TO_CHECK):
            try:
                # Do not pass the client id we got from the caller. This will
                # get filled automatically from the hunt results and we check
                # later that the aff4_path we get is the same as the one that
                # was requested.
                aff4_path = export.CollectionItemToAff4Path(item,
                                                            client_id=None)
            except export.ItemNotExportableError:
                continue

            if aff4_path != expected_aff4_path:
                continue

            try:
                aff4_stream = aff4.FACTORY.Open(aff4_path,
                                                aff4_type=aff4.AFF4Stream,
                                                token=token)
                if not aff4_stream.GetContentAge():
                    break

                return api_call_handler_base.ApiBinaryStream(
                    "%s_%s" %
                    (args.client_id, utils.SmartStr(aff4_path.Basename())),
                    content_generator=self._GenerateFile(aff4_stream),
                    content_length=len(aff4_stream))
            except aff4.InstantiationError:
                break

        raise HuntFileNotFoundError(
            "File %s with timestamp %s and client %s "
            "wasn't found among the results of hunt %s" %
            (utils.SmartStr(args.vfs_path), utils.SmartStr(args.timestamp),
             utils.SmartStr(args.client_id), utils.SmartStr(args.hunt_id)))
Esempio n. 15
0
def main(argv):
  """Main."""
  del argv  # Unused.

  token = GetToken()
  grr_config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT)
  grr_config.CONFIG.AddContext(contexts.CONFIG_UPDATER_CONTEXT)

  if flags.FLAGS.subparser_name == "initialize":
    config_lib.ParseConfigCommandLine()
    if flags.FLAGS.noprompt:
      InitializeNoPrompt(grr_config.CONFIG, token=token)
    else:
      Initialize(grr_config.CONFIG, token=token)
    return

  server_startup.Init()

  try:
    print("Using configuration %s" % grr_config.CONFIG)
  except AttributeError:
    raise RuntimeError("No valid config specified.")

  if flags.FLAGS.subparser_name == "generate_keys":
    try:
      GenerateKeys(grr_config.CONFIG, overwrite_keys=flags.FLAGS.overwrite_keys)
    except RuntimeError as e:
      # GenerateKeys will raise if keys exist and overwrite_keys is not set.
      print("ERROR: %s" % e)
      sys.exit(1)
    grr_config.CONFIG.Write()

  elif flags.FLAGS.subparser_name == "repack_clients":
    upload = not flags.FLAGS.noupload
    repacking.TemplateRepacker().RepackAllTemplates(upload=upload, token=token)

  elif flags.FLAGS.subparser_name == "show_user":
    maintenance_utils.ShowUser(flags.FLAGS.username, token=token)

  elif flags.FLAGS.subparser_name == "update_user":
    try:
      maintenance_utils.UpdateUser(
          flags.FLAGS.username,
          flags.FLAGS.password,
          flags.FLAGS.add_labels,
          flags.FLAGS.delete_labels,
          token=token)
    except maintenance_utils.UserError as e:
      print(e)

  elif flags.FLAGS.subparser_name == "delete_user":
    maintenance_utils.DeleteUser(flags.FLAGS.username, token=token)

  elif flags.FLAGS.subparser_name == "add_user":
    labels = []
    if not flags.FLAGS.noadmin:
      labels.append("admin")

    if flags.FLAGS.labels:
      labels.extend(flags.FLAGS.labels)

    try:
      maintenance_utils.AddUser(
          flags.FLAGS.username, flags.FLAGS.password, labels, token=token)
    except maintenance_utils.UserError as e:
      print(e)

  elif flags.FLAGS.subparser_name == "upload_python":
    python_hack_root_urn = grr_config.CONFIG.Get("Config.python_hack_root")
    content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30)
    aff4_path = flags.FLAGS.dest_path
    platform = flags.FLAGS.platform
    if not aff4_path:
      aff4_path = python_hack_root_urn.Add(platform.lower()).Add(
          os.path.basename(flags.FLAGS.file))
    if not str(aff4_path).startswith(str(python_hack_root_urn)):
      raise ValueError("AFF4 path must start with %s." % python_hack_root_urn)
    context = ["Platform:%s" % platform.title(), "Client Context"]
    maintenance_utils.UploadSignedConfigBlob(
        content, aff4_path=aff4_path, client_context=context, token=token)

  elif flags.FLAGS.subparser_name == "upload_exe":
    content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30)
    context = ["Platform:%s" % flags.FLAGS.platform.title(), "Client Context"]

    if flags.FLAGS.dest_path:
      dest_path = rdfvalue.RDFURN(flags.FLAGS.dest_path)
    else:
      dest_path = grr_config.CONFIG.Get(
          "Executables.aff4_path", context=context).Add(
              os.path.basename(flags.FLAGS.file))

    # Now upload to the destination.
    maintenance_utils.UploadSignedConfigBlob(
        content, aff4_path=dest_path, client_context=context, token=token)

    print("Uploaded to %s" % dest_path)

  elif flags.FLAGS.subparser_name == "set_var":
    config = grr_config.CONFIG
    print("Setting %s to %s" % (flags.FLAGS.var, flags.FLAGS.val))
    if flags.FLAGS.val.startswith("["):  # Allow setting of basic lists.
      flags.FLAGS.val = flags.FLAGS.val[1:-1].split(",")
    config.Set(flags.FLAGS.var, flags.FLAGS.val)
    config.Write()

  elif flags.FLAGS.subparser_name == "upload_raw":
    if not flags.FLAGS.dest_path:
      flags.FLAGS.dest_path = aff4.ROOT_URN.Add("config").Add("raw")
    uploaded = UploadRaw(flags.FLAGS.file, flags.FLAGS.dest_path, token=token)
    print("Uploaded to %s" % uploaded)

  elif flags.FLAGS.subparser_name == "upload_artifact":
    yaml.load(open(flags.FLAGS.file, "rb"))  # Check it will parse.
    try:
      artifact.UploadArtifactYamlFile(
          open(flags.FLAGS.file, "rb").read(),
          overwrite=flags.FLAGS.overwrite_artifact)
    except rdf_artifacts.ArtifactDefinitionError as e:
      print("Error %s. You may need to set --overwrite_artifact." % e)

  elif flags.FLAGS.subparser_name == "delete_artifacts":
    artifact_list = flags.FLAGS.artifact
    if not artifact_list:
      raise ValueError("No artifact to delete given.")
    artifact_registry.DeleteArtifactsFromDatastore(artifact_list, token=token)
    print("Artifacts %s deleted." % artifact_list)

  elif flags.FLAGS.subparser_name == "download_missing_rekall_profiles":
    print("Downloading missing Rekall profiles.")
    s = rekall_profile_server.GRRRekallProfileServer()
    s.GetMissingProfiles()

  elif flags.FLAGS.subparser_name == "set_global_notification":
    notification = aff4_users.GlobalNotification(
        type=flags.FLAGS.type,
        header=flags.FLAGS.header,
        content=flags.FLAGS.content,
        link=flags.FLAGS.link)
    if flags.FLAGS.show_from:
      notification.show_from = rdfvalue.RDFDatetime().ParseFromHumanReadable(
          flags.FLAGS.show_from)
    if flags.FLAGS.duration:
      notification.duration = rdfvalue.Duration().ParseFromHumanReadable(
          flags.FLAGS.duration)

    print("Setting global notification.")
    print(notification)

    with aff4.FACTORY.Create(
        aff4_users.GlobalNotificationStorage.DEFAULT_PATH,
        aff4_type=aff4_users.GlobalNotificationStorage,
        mode="rw",
        token=token) as storage:
      storage.AddNotification(notification)
  elif flags.FLAGS.subparser_name == "rotate_server_key":
    print("""
You are about to rotate the server key. Note that:

  - Clients might experience intermittent connection problems after
    the server keys rotated.

  - It's not possible to go back to an earlier key. Clients that see a
    new certificate will remember the cert's serial number and refuse
    to accept any certificate with a smaller serial number from that
    point on.
    """)

    if input("Continue? [yN]: ").upper() == "Y":
      if flags.FLAGS.keylength:
        keylength = int(flags.FLAGS.keylength)
      else:
        keylength = grr_config.CONFIG["Server.rsa_key_length"]

      maintenance_utils.RotateServerKey(
          cn=flags.FLAGS.common_name, keylength=keylength)
  elif flags.FLAGS.subparser_name == "migrate_data":
    data_migration.Migrate()