Exemple #1
0
 def testRdfFormatterHandlesKeyValuePair(self):
     """rdfvalue.KeyValue items need special handling to expand k and v."""
     key = rdf_protodict.DataBlob().SetValue("skynet")
     value = rdf_protodict.DataBlob().SetValue([1997])
     rdf = rdf_protodict.KeyValue(k=key, v=value)
     template = "{k}: {v}"
     hinter = hints.Hinter(template=template)
     expected = "skynet: 1997"
     result = hinter.Render(rdf)
     self.assertEqual(expected, result)
  def _MakeRegStat(self, path, value, registry_type):
    options = rdf_paths.PathSpec.Options.CASE_LITERAL
    pathspec = rdf_paths.PathSpec(
        path=path,
        path_options=options,
        pathtype=rdf_paths.PathSpec.PathType.REGISTRY)

    if registry_type == rdf_client.StatEntry.RegistryType.REG_MULTI_SZ:
      reg_data = rdf_protodict.DataBlob(list=rdf_protodict.BlobArray(
          content=rdf_protodict.DataBlob(string=value)))
    else:
      reg_data = rdf_protodict.DataBlob().SetValue(value)

    return rdf_client.StatEntry(
        pathspec=pathspec, registry_data=reg_data, registry_type=registry_type)
Exemple #3
0
    def Run(self, unused_arg):
        """This kills us with no cleanups."""
        logging.debug("Disabling service")

        win32serviceutil.ChangeServiceConfig(
            None,
            config.CONFIG["Nanny.service_name"],
            startType=win32service.SERVICE_DISABLED)
        svc_config = QueryService(config.CONFIG["Nanny.service_name"])
        if svc_config[1] == win32service.SERVICE_DISABLED:
            logging.info("Disabled service successfully")
            self.SendReply(rdf_protodict.DataBlob(string="Service disabled."))
        else:
            self.SendReply(
                rdf_protodict.DataBlob(string="Service failed to disable."))
Exemple #4
0
    def Run(self, args):
        """Reads a buffer on the client and sends it to the server."""
        # Make sure we limit the size of our output
        if args.length > constants.CLIENT_MAX_BUFFER_SIZE:
            raise RuntimeError("Can not read buffers this large.")

        data = vfs.ReadVFS(args.pathspec,
                           args.offset,
                           args.length,
                           progress_callback=self.Progress)
        result = rdf_protodict.DataBlob(
            data=zlib.compress(data),
            compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)

        digest = hashlib.sha256(data).digest()

        # Ensure that the buffer is counted against this response. Check network
        # send limit.
        self.ChargeBytesToSession(len(data))

        # Now return the data to the server into the special TransferStore well
        # known flow.
        self.grr_worker.SendReply(
            result, session_id=rdfvalue.SessionID(flow_name="TransferStore"))

        # Now report the hash of this blob to our flow as well as the offset and
        # length.
        self.SendReply(
            rdf_client.BufferReference(offset=args.offset,
                                       length=len(data),
                                       data=digest))
Exemple #5
0
    def Run(self, unused_arg):
        """This kills us with no cleanups."""
        logging.debug("Disabling service")

        msg = "Service disabled."
        if hasattr(sys, "frozen"):
            grr_binary = os.path.abspath(sys.executable)
        elif __file__:
            grr_binary = os.path.abspath(__file__)

        try:
            os.remove(grr_binary)
        except OSError:
            msg = "Could not remove binary."

        try:
            os.remove(config.CONFIG["Client.plist_path"])
        except OSError:
            if "Could not" in msg:
                msg += " Could not remove plist file."
            else:
                msg = "Could not remove plist file."

        # Get the directory we are running in from pyinstaller. This is either the
        # GRR directory which we should delete (onedir mode) or a generated temp
        # directory which we can delete without problems in onefile mode.
        directory = getattr(sys, "_MEIPASS", None)
        if directory:
            shutil.rmtree(directory, ignore_errors=True)

        self.SendReply(rdf_protodict.DataBlob(string=msg))
Exemple #6
0
 def _ForemanOp(self):
     """Sends Foreman checks periodically."""
     period = config.CONFIG["Client.foreman_check_frequency"]
     self._threads["Worker"].SendReply(
         rdf_protodict.DataBlob(),
         session_id=rdfvalue.FlowSessionID(flow_name="Foreman"),
         require_fastpoll=False)
     time.sleep(period)
    def MakeRegistryStatEntry(self, path, value):
        options = rdf_paths.PathSpec.Options.CASE_LITERAL
        pathspec = rdf_paths.PathSpec(
            path=path,
            path_options=options,
            pathtype=rdf_paths.PathSpec.PathType.REGISTRY)

        return rdf_client.StatEntry(
            pathspec=pathspec,
            registry_data=rdf_protodict.DataBlob().SetValue(value),
            registry_type=rdf_client.StatEntry.RegistryType.REG_SZ)
Exemple #8
0
    def testEqualTimestampNotifications(self):
        frontend_server = frontend_lib.FrontEndServer(
            certificate=config.CONFIG["Frontend.certificate"],
            private_key=config.CONFIG["PrivateKeys.server_key"],
            message_expiry_time=100,
            threadpool_prefix="notification-test")

        # This schedules 10 requests.
        session_id = flow.StartFlow(client_id=self.client_id,
                                    flow_name="WorkerSendingTestFlow",
                                    token=self.token)

        # We pretend that the client processed all the 10 requests at once and
        # sends the replies in a single http poll.
        messages = [
            rdf_flows.GrrMessage(
                request_id=i,
                response_id=1,
                session_id=session_id,
                payload=rdf_protodict.DataBlob(string="test%s" % i),
                auth_state="AUTHENTICATED",
                generate_task_id=True) for i in range(1, 11)
        ]
        status = rdf_flows.GrrStatus(
            status=rdf_flows.GrrStatus.ReturnedStatus.OK)
        statuses = [
            rdf_flows.GrrMessage(request_id=i,
                                 response_id=2,
                                 session_id=session_id,
                                 payload=status,
                                 type=rdf_flows.GrrMessage.Type.STATUS,
                                 auth_state="AUTHENTICATED",
                                 generate_task_id=True) for i in range(1, 11)
        ]

        frontend_server.ReceiveMessages(self.client_id, messages + statuses)

        with queue_manager.QueueManager(token=self.token) as q:
            all_notifications = q.GetNotificationsByPriorityForAllShards(
                rdfvalue.RDFURN("aff4:/F"))
            medium_priority = rdf_flows.GrrNotification.Priority.MEDIUM_PRIORITY
            medium_notifications = all_notifications[medium_priority]
            my_notifications = [
                n for n in medium_notifications if n.session_id == session_id
            ]
            # There must not be more than one notification.
            self.assertEqual(len(my_notifications), 1)
            notification = my_notifications[0]
            self.assertEqual(notification.first_queued, notification.timestamp)
            self.assertEqual(notification.last_status, 10)
Exemple #9
0
    def ProcessFileStats(self, responses):
        """Extract DataBlob from Stat response."""
        if not responses.success:
            return

        system_root_paths = ["Windows", "WinNT", "WINNT35", "WTSRV", "WINDOWS"]
        for response in responses:
            if response.pathspec.path[4:] in system_root_paths:
                systemdrive = response.pathspec.path[1:3]
                systemroot = "%s\\%s" % (systemdrive,
                                         response.pathspec.path[4:])

                # Put the data back into the original format expected for the artifact
                data = rdf_protodict.DataBlob().SetValue(systemroot)
                self.SendReply(rdf_client.StatEntry(registry_data=data))
                self.state.success = True
                break
Exemple #10
0
    def testParse(self):
        parser = windows_persistence.WindowsPersistenceMechanismsParser()
        path = (r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion"
                r"\Run\test")
        pathspec = rdf_paths.PathSpec(
            path=path, pathtype=rdf_paths.PathSpec.PathType.REGISTRY)
        reg_data = "C:\\blah\\some.exe /v"
        reg_type = rdf_client.StatEntry.RegistryType.REG_SZ
        stat = rdf_client.StatEntry(
            pathspec=pathspec,
            registry_type=reg_type,
            registry_data=rdf_protodict.DataBlob(string=reg_data))

        persistence = [stat]
        image_paths = [
            "system32\\drivers\\ACPI.sys",
            "%systemroot%\\system32\\svchost.exe -k netsvcs",
            "\\SystemRoot\\system32\\drivers\\acpipmi.sys"
        ]
        reg_key = "HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/services/AcpiPmi"
        for path in image_paths:
            serv_info = rdf_client.WindowsServiceInformation(
                name="blah",
                display_name="GRRservice",
                image_path=path,
                registry_key=reg_key)
            persistence.append(serv_info)

        knowledge_base = rdf_client.KnowledgeBase()
        knowledge_base.environ_systemroot = "C:\\Windows"

        expected = [
            "C:\\blah\\some.exe", "C:\\Windows\\system32\\drivers\\ACPI.sys",
            "C:\\Windows\\system32\\svchost.exe",
            "C:\\Windows\\system32\\drivers\\acpipmi.sys"
        ]

        for index, item in enumerate(persistence):
            results = list(
                parser.Parse(item, knowledge_base,
                             rdf_paths.PathSpec.PathType.OS))
            self.assertEqual(results[0].pathspec.path, expected[index])
            self.assertEqual(len(results), 1)
Exemple #11
0
    def SendResponse(self,
                     session_id,
                     data,
                     client_id=None,
                     well_known=False,
                     request_id=None):
        if not isinstance(data, rdfvalue.RDFValue):
            data = rdf_protodict.DataBlob(string=data)
        if well_known:
            request_id, response_id = 0, 12345
        else:
            request_id, response_id = request_id or 1, 1
        with queue_manager.QueueManager(token=self.token) as flow_manager:
            flow_manager.QueueResponse(
                rdf_flows.GrrMessage(source=client_id,
                                     session_id=session_id,
                                     payload=data,
                                     request_id=request_id,
                                     auth_state="AUTHENTICATED",
                                     response_id=response_id))
            if not well_known:
                # For normal flows we have to send a status as well.
                flow_manager.QueueResponse(
                    rdf_flows.GrrMessage(
                        source=client_id,
                        session_id=session_id,
                        payload=rdf_flows.GrrStatus(
                            status=rdf_flows.GrrStatus.ReturnedStatus.OK),
                        request_id=request_id,
                        response_id=response_id + 1,
                        auth_state="AUTHENTICATED",
                        type=rdf_flows.GrrMessage.Type.STATUS))

            flow_manager.QueueNotification(session_id=session_id,
                                           last_status=request_id)
            timestamp = flow_manager.frozen_timestamp

        return timestamp
Exemple #12
0
  def _Stat(self, name, value, value_type, mtime=None):
    response = rdf_client.StatEntry()
    response_pathspec = self.pathspec.Copy()

    # No matter how we got here, there is no need to do case folding from now on
    # since this is the exact filename casing.
    response_pathspec.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL

    response_pathspec.last.path = utils.JoinPath(response_pathspec.last.path,
                                                 name)
    response.pathspec = response_pathspec

    if self.IsDirectory():
      response.st_mode = stat.S_IFDIR
    else:
      response.st_mode = stat.S_IFREG
    if mtime:
      response.st_mtime = mtime
    response.st_size = len(utils.SmartStr(value))
    if value_type is not None:
      response.registry_type = self.registry_map.get(value_type, 0)
      response.registry_data = rdf_protodict.DataBlob().SetValue(value)
    return response
Exemple #13
0
 def Store(self, data):
     self.storage.append(self.in_rdfvalue(data).string)
     return [rdf_protodict.DataBlob(string="Hello World")]
Exemple #14
0
    def testNoValidStatusRaceIsResolved(self):

        # This tests for the regression of a long standing race condition we saw
        # where notifications would trigger the reading of another request that
        # arrives later but wasn't completely written to the database yet.
        # Timestamp based notification handling should eliminate this bug.

        # We need a random flow object for this test.
        session_id = flow.StartFlow(client_id=self.client_id,
                                    flow_name="WorkerSendingTestFlow",
                                    token=self.token)
        worker_obj = worker_lib.GRRWorker(token=self.token)
        manager = queue_manager.QueueManager(token=self.token)

        manager.DeleteNotification(session_id)
        manager.Flush()

        # We have a first request that is complete (request_id 1, response_id 1).
        self.SendResponse(session_id, "Response 1")

        # However, we also have request #2 already coming in. The race is that
        # the queue manager might write the status notification to
        # session_id/state as "status:00000002" but not the status response
        # itself yet under session_id/state/request:00000002

        request_id = 2
        response_id = 1
        flow_manager = queue_manager.QueueManager(token=self.token)
        flow_manager.FreezeTimestamp()

        flow_manager.QueueResponse(
            rdf_flows.GrrMessage(
                source=self.client_id,
                session_id=session_id,
                payload=rdf_protodict.DataBlob(string="Response 2"),
                request_id=request_id,
                auth_state="AUTHENTICATED",
                response_id=response_id))

        status = rdf_flows.GrrMessage(
            source=self.client_id,
            session_id=session_id,
            payload=rdf_flows.GrrStatus(
                status=rdf_flows.GrrStatus.ReturnedStatus.OK),
            request_id=request_id,
            response_id=response_id + 1,
            auth_state="AUTHENTICATED",
            type=rdf_flows.GrrMessage.Type.STATUS)

        # Now we write half the status information.
        data_store.DB.StoreRequestsAndResponses(new_responses=[(status, None)])

        # We make the race even a bit harder by saying the new notification gets
        # written right before the old one gets deleted. If we are not careful here,
        # we delete the new notification as well and the flow becomes stuck.

        # pylint: disable=invalid-name
        def WriteNotification(self, arg_session_id, start=None, end=None):
            if arg_session_id == session_id:
                flow_manager.QueueNotification(session_id=arg_session_id)
                flow_manager.Flush()

            self.DeleteNotification.old_target(self,
                                               arg_session_id,
                                               start=start,
                                               end=end)

        # pylint: enable=invalid-name

        with utils.Stubber(queue_manager.QueueManager, "DeleteNotification",
                           WriteNotification):
            # This should process request 1 but not touch request 2.
            worker_obj.RunOnce()
            worker_obj.thread_pool.Join()

        flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
        self.assertFalse(flow_obj.context.backtrace)
        self.assertNotEqual(flow_obj.context.state,
                            rdf_flow_runner.FlowContext.State.ERROR)

        request_data = data_store.DB.ReadResponsesForRequestId(session_id, 2)
        request_data.sort(key=lambda msg: msg.response_id)
        self.assertEqual(len(request_data), 2)

        # Make sure the status and the original request are still there.
        self.assertEqual(request_data[0].args_rdf_name, "DataBlob")
        self.assertEqual(request_data[1].args_rdf_name, "GrrStatus")

        # But there is nothing for request 1.
        request_data = data_store.DB.ReadResponsesForRequestId(session_id, 1)
        self.assertEqual(request_data, [])

        # The notification for request 2 should have survived.
        with queue_manager.QueueManager(token=self.token) as manager:
            notifications = manager.GetNotifications(queues.FLOWS)
            self.assertEqual(len(notifications), 1)
            notification = notifications[0]
            self.assertEqual(notification.session_id, session_id)
            self.assertEqual(notification.timestamp,
                             flow_manager.frozen_timestamp)

        self.assertEqual(RESULTS, ["Response 1"])

        # The last missing piece of request 2 is the actual status message.
        flow_manager.QueueResponse(status)
        flow_manager.Flush()

        # Now make sure request 2 runs as expected.
        worker_obj.RunOnce()
        worker_obj.thread_pool.Join()

        self.assertEqual(RESULTS, ["Response 1", "Response 2"])
Exemple #15
0
 def Start(self):
     i = 1
     self.CallClient(client_test_lib.Test,
                     rdf_protodict.DataBlob(string="test%s" % i),
                     data=str(i),
                     next_state="Incoming")
Exemple #16
0
 def Run(self, unused_args):
     self.SendReply(rdf_protodict.DataBlob(string=socket.gethostname()))
Exemple #17
0
 def Start(self):
     data = rdf_protodict.DataBlob().SetValue("All Users")
     self.SendReply(rdf_client.StatEntry(registry_data=data))
     self.state.success = True
Exemple #18
0
 def ReturnBlob(self, unused_args):
     return [rdf_protodict.DataBlob(integer=100)]
Exemple #19
0
def _CompressedDataBlob(chunk):
    return rdf_protodict.DataBlob(
        data=zlib.compress(chunk.data),
        compression=rdf_protodict.DataBlob.CompressionType.ZCOMPRESSION)
Exemple #20
0
 def Start(cls, args):
   del args  # Unused.
   yield rdf_protodict.DataBlob(string=socket.gethostname())