Exemplo n.º 1
0
    def testNewHuntWizard(self):
        # Open up and click on View Hunts.
        self.Open("/")
        self.WaitUntil(self.IsElementPresent, "client_query")
        self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
        self.Click("css=a[grrtarget=hunts]")
        self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")

        # Open up "New Hunt" wizard
        self.Click("css=button[name=NewHunt]")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('What to run?')")

        # Click on Filesystem item in flows list
        self.WaitUntil(self.IsElementPresent,
                       "css=#_Filesystem > i.jstree-icon")
        self.Click("css=#_Filesystem > i.jstree-icon")

        # Click on the FileFinder item in Filesystem flows list
        self.Click("link=File Finder")

        # Wait for flow configuration form to be rendered (just wait for first
        # input field).
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-new-hunt-wizard-form label:contains('Paths')")

        # Change "path" and "pathtype" values
        self.Type(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-repeated-field:has(label:contains('Paths')) "
            "input", "/tmp")
        self.Select(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-single-field:has(label:contains('Pathtype')) "
            "select", "TSK")

        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Hunt parameters')")

        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(
            self.IsElementPresent,
            "css=grr-wizard-form:contains('How to process results')")

        # Click on "Back" button and check that all the values in the form
        # remain intact.
        self.Click("css=grr-new-hunt-wizard-form button.Back")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Hunt parameters')")

        self.Click("css=grr-new-hunt-wizard-form button.Back")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-new-hunt-wizard-form label:contains('Paths')")

        self.assertEqual(
            "/tmp",
            self.GetValue(
                "css=grr-new-hunt-wizard-form "
                "grr-form-proto-repeated-field:has(label:contains('Paths')) input"
            ))

        self.assertEqual(
            "TSK",
            self.GetSelectedLabel(
                "css=grr-new-hunt-wizard-form "
                "grr-form-proto-single-field:has(label:contains('Pathtype')) select"
            ))

        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Hunt parameters')")

        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(
            self.IsElementPresent,
            "css=grr-wizard-form:contains('How to process results')")

        # Configure the hunt to use dummy output plugin.
        self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
        self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
        self.Type(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-single-field:has(label:contains('Filename Regex')) "
            "input", "some regex")

        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Where to run?')")

        # Empty set of rules should be valid.
        self.WaitUntil(self.IsElementPresent,
                       "css=button.Next:not([disabled])")

        # A note informs what an empty set of rules means.
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('No rules specified!')")

        # Alternative match mode that matches a client if
        # any of the rules evaluates to true can be selected.
        self.Select(
            "css=grr-configure-rules-page "
            "label:contains('Match mode') ~ * select", "Match any")

        # The note depends on the match mode.
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('No rules specified!')")

        # Create 3 foreman rules. Note that "Add" button adds rules
        # to the beginning of a list. So we always use :nth(0) selector.
        self.Click("css=grr-configure-rules-page button[name=Add]")
        self.Select("css=grr-configure-rules-page div.well:nth(0) select",
                    "Regex")
        rule = foreman_rules.ForemanRegexClientRule
        label = rule.ForemanStringField.SYSTEM.description
        self.Select(
            "css=grr-configure-rules-page div.well:nth(0) "
            "label:contains('Field') ~ * select", label)
        self.Type(
            "css=grr-configure-rules-page div.well:nth(0) "
            "label:contains('Attribute regex') ~ * input", "Linux")

        self.Click("css=grr-configure-rules-page button[name=Add]")
        self.Select("css=grr-configure-rules-page div.well:nth(0) select",
                    "Integer")

        rule = foreman_rules.ForemanIntegerClientRule
        label = rule.ForemanIntegerField.CLIENT_CLOCK.description
        self.Select(
            "css=grr-configure-rules-page div.well:nth(0) "
            "label:contains('Field') ~ * select", label)
        self.Select(
            "css=grr-configure-rules-page div.well:nth(0) "
            "label:contains('Operator') ~ * select", "GREATER_THAN")
        self.Type(
            "css=grr-configure-rules-page div.well:nth(0) "
            "label:contains('Value') ~ * input", "1336650631137737")

        self.Click("css=grr-configure-rules-page button[name=Add]")
        self.Click("css=grr-configure-rules-page div.well:nth(0) "
                   "label:contains('Os darwin') ~ * input[type=checkbox]")

        # Click on "Back" button
        self.Click("css=grr-new-hunt-wizard-form button.Back")
        self.WaitUntil(
            self.IsElementPresent,
            "css=grr-wizard-form:contains('How to process results')")

        # Click on "Next" button again and check that all the values that
        # we've just entered remain intact.
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Where to run?')")

        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Review')")

        # Check that the arguments summary is present.
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Paths')")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('/tmp')")

        # Check that output plugins are shown.
        self.assertTrue(
            self.IsElementPresent(
                "css=grr-wizard-form:contains('DummyOutputPlugin')"))
        self.assertTrue(
            self.IsElementPresent(
                "css=grr-wizard-form:contains('some regex')"))

        # Check that there's no deprecated rules summary.
        self.assertFalse(
            self.IsElementPresent(
                "css=grr-wizard-form:contains('Regex rules')"))
        self.assertFalse(
            self.IsElementPresent(
                "css=grr-wizard-form:contains('Integer rules')"))

        # Check that rules summary is present.
        self.assertTrue(
            self.IsElementPresent(
                "css=grr-wizard-form:contains('Client rule set')"))

        # Click on "Run" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")

        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Created Hunt')")

        # Close the window and check that the hunt was created.
        self.Click("css=button.Next")

        # Select newly created hunt.
        self.Click("css=grr-hunts-list td:contains('GenericHunt')")

        # Check that correct details are displayed in hunt details tab.
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-hunt-inspector:contains('GenericHunt')")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-hunt-inspector:contains('Flow Arguments')")

        self.assertTrue(
            self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
        self.assertTrue(
            self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))

        self.assertTrue(
            self.IsElementPresent(
                "css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
        self.assertTrue(
            self.IsElementPresent(
                "css=grr-hunt-inspector:contains('some regex')"))

        # Check that there's no deprecated rules summary.
        self.assertFalse(
            self.IsElementPresent(
                "css=grr-hunt-inspector:contains('Regex rules')"))
        self.assertFalse(
            self.IsElementPresent(
                "css=grr-hunt-inspector:contains('Integer rules')"))

        # Check that rules summary is present.
        self.assertTrue(
            self.IsElementPresent(
                "css=grr-hunt-inspector:contains('Client Rule Set')"))

        # Check that the hunt object was actually created
        if data_store.RelationalDBReadEnabled():
            hunts_list = sorted(data_store.REL_DB.ReadHuntObjects(offset=0,
                                                                  count=10),
                                key=lambda x: x.create_time)
            self.assertLen(hunts_list, 1)

            # Check that the hunt was created with a correct flow
            hunt = hunts_list[0]

            self.assertEqual(hunt.args.standard.flow_name,
                             file_finder.FileFinder.__name__)
            self.assertEqual(hunt.args.standard.flow_args.paths[0], "/tmp")
            self.assertEqual(hunt.args.standard.flow_args.pathtype,
                             rdf_paths.PathSpec.PathType.TSK)
            # self.assertEqual(hunt.args.flow_args.ignore_errors, True)
            self.assertTrue(hunt.output_plugins[0].plugin_name,
                            "DummyOutputPlugin")

            # Check that hunt was not started
            self.assertEqual(hunt.hunt_state, hunt.HuntState.PAUSED)

            lib_hunt.StartHunt(hunt.hunt_id)

            hunt_rules = self.FindForemanRules(rdfvalue.RDFURN("hunts").Add(
                hunt.hunt_id),
                                               token=self.token)
        else:
            hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
            hunts_list = list(hunts_root.OpenChildren())
            self.assertLen(hunts_list, 1)

            # Check that the hunt was created with a correct flow
            hunt = hunts_list[0]
            self.assertEqual(hunt.args.flow_runner_args.flow_name,
                             file_finder.FileFinder.__name__)
            self.assertEqual(hunt.args.flow_args.paths[0], "/tmp")
            self.assertEqual(hunt.args.flow_args.pathtype,
                             rdf_paths.PathSpec.PathType.TSK)
            # self.assertEqual(hunt.args.flow_args.ignore_errors, True)
            self.assertTrue(hunt.runner_args.output_plugins[0].plugin_name,
                            "DummyOutputPlugin")

            # Check that hunt was not started
            self.assertEqual(hunt.Get(hunt.Schema.STATE), "PAUSED")

            with aff4.FACTORY.Open(hunt.urn, mode="rw",
                                   token=self.token) as hunt:
                hunt.Run()

            hunt_rules = self.FindForemanRules(hunt.urn, token=self.token)

        # Check that the hunt was created with correct rules
        self.assertLen(hunt_rules, 1)
        lifetime = hunt_rules[0].GetLifetime()
        lifetime -= rdfvalue.Duration("2w")
        self.assertLessEqual(lifetime, rdfvalue.Duration("1s"))

        r = hunt_rules[0].client_rule_set

        self.assertEqual(
            r.match_mode,
            foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
        self.assertLen(r.rules, 3)

        self.assertEqual(r.rules[0].rule_type,
                         foreman_rules.ForemanClientRule.Type.OS)
        self.assertEqual(r.rules[0].os.os_windows, False)
        self.assertEqual(r.rules[0].os.os_linux, False)
        self.assertEqual(r.rules[0].os.os_darwin, True)

        self.assertEqual(r.rules[1].rule_type,
                         foreman_rules.ForemanClientRule.Type.INTEGER)
        self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
        self.assertEqual(
            r.rules[1].integer.operator,
            foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
        self.assertEqual(r.rules[1].integer.value, 1336650631137737)

        self.assertEqual(r.rules[2].rule_type,
                         foreman_rules.ForemanClientRule.Type.REGEX)
        self.assertEqual(r.rules[2].regex.field, "SYSTEM")
        self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
Exemplo n.º 2
0
    def WriteBuffer(self, responses):
        """Write the hash received to the blob image."""

        index = responses.request_data["index"]
        if index not in self.state.pending_files:
            return

        # Failed to read the file - ignore it.
        if not responses.success:
            self._FileFetchFailed(index, responses.request.request.name)
            return

        response = responses.First()
        file_tracker = self.state.pending_files.get(index)
        if file_tracker:
            blob_dict = file_tracker.setdefault("blobs", {})
            blob_index = responses.request_data["blob_index"]
            blob_dict[blob_index] = (response.data, response.length)

            if len(blob_dict) == len(file_tracker["hash_list"]):
                # Write the file to the data store.
                stat_entry = file_tracker["stat_entry"]
                urn = stat_entry.pathspec.AFF4Path(self.client_urn)

                if data_store.AFF4Enabled():
                    with aff4.FACTORY.Create(urn,
                                             aff4_grr.VFSBlobImage,
                                             mode="w",
                                             token=self.token) as fd:

                        fd.SetChunksize(self.CHUNK_SIZE)
                        fd.Set(fd.Schema.STAT(stat_entry))
                        fd.Set(fd.Schema.PATHSPEC(stat_entry.pathspec))
                        fd.Set(
                            fd.Schema.CONTENT_LAST(
                                rdfvalue.RDFDatetime().Now()))

                        for index in sorted(blob_dict):
                            digest, length = blob_dict[index]
                            fd.AddBlob(rdf_objects.BlobID.FromBytes(digest),
                                       length)

                if data_store.RelationalDBWriteEnabled():
                    path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)

                    # Adding files to filestore requires reading data from RELDB,
                    # thus protecting this code with a filestore-read-enabled check.
                    if data_store.RelationalDBReadEnabled("filestore"):
                        blob_refs = []
                        offset = 0
                        for index in sorted(blob_dict):
                            digest, size = blob_dict[index]
                            blob_refs.append(
                                rdf_objects.BlobReference(
                                    offset=offset,
                                    size=size,
                                    blob_id=rdf_objects.BlobID.FromBytes(
                                        digest)))
                            offset += size

                        hash_obj = file_tracker["hash_obj"]

                        client_path = db.ClientPath.FromPathInfo(
                            self.client_id, path_info)
                        hash_id = file_store.AddFileWithUnknownHash(
                            client_path,
                            blob_refs,
                            use_external_stores=self.state.use_external_stores)
                        # If the hash that we've calculated matches what we got from the
                        # client, then simply store the full hash entry.
                        # Otherwise store just the hash that we've calculated.
                        if hash_id.AsBytes() == hash_obj.sha256:
                            path_info.hash_entry = hash_obj
                        else:
                            path_info.hash_entry.sha256 = hash_id.AsBytes()

                    data_store.REL_DB.WritePathInfos(self.client_id,
                                                     [path_info])

                if (not data_store.RelationalDBReadEnabled("filestore")
                        and self.state.use_external_stores):
                    # Publish the new file event to cause the file to be added to the
                    # filestore.
                    events.Events.PublishEvent(
                        "LegacyFileStore.AddFileToStore",
                        urn,
                        token=self.token)

                # Save some space.
                del file_tracker["blobs"]
                del file_tracker["hash_list"]

                # File done, remove from the store and close it.
                self._ReceiveFetchedFile(file_tracker)

                self.state.files_fetched += 1

                if not self.state.files_fetched % 100:
                    self.Log("Fetched %d of %d files.",
                             self.state.files_fetched,
                             self.state.files_to_fetch)
Exemplo n.º 3
0
    def Handle(self, args, token=None):
        if args.count:
            end = args.offset + args.count
            # Read <count> clients ahead in case some of them fail to open / verify.
            batch_size = end + args.count
        else:
            end = sys.maxsize
            batch_size = end

        keywords = compatibility.ShlexSplit(args.query)
        api_clients = []

        if data_store.RelationalDBReadEnabled():
            index = client_index.ClientIndex()

            # TODO(amoser): We could move the label verification into the
            # database making this method more efficient. Label restrictions
            # should be on small subsets though so this might not be worth
            # it.
            all_client_ids = set()
            for label in self.labels_whitelist:
                label_filter = ["label:" + label] + keywords
                all_client_ids.update(index.LookupClients(label_filter))

            index = 0
            for cid_batch in collection.Batch(sorted(all_client_ids),
                                              batch_size):
                client_infos = data_store.REL_DB.MultiReadClientFullInfo(
                    cid_batch)

                for _, client_info in sorted(iteritems(client_infos)):
                    if not self._VerifyLabels(client_info.labels):
                        continue
                    if index >= args.offset and index < end:
                        api_clients.append(
                            ApiClient().InitFromClientInfo(client_info))
                    index += 1
                    if index >= end:
                        UpdateClientsFromFleetspeak(api_clients)
                        return ApiSearchClientsResult(items=api_clients)

        else:
            index = client_index.CreateClientIndex(token=token)
            all_urns = set()
            for label in self.labels_whitelist:
                label_filter = ["label:" + label] + keywords
                all_urns.update(index.LookupClients(label_filter))

            all_objs = aff4.FACTORY.MultiOpen(all_urns,
                                              aff4_type=aff4_grr.VFSGRRClient,
                                              token=token)

            index = 0
            for client_obj in sorted(all_objs):
                if not self._CheckClientLabels(client_obj):
                    continue
                if index >= args.offset and index < end:
                    api_clients.append(
                        ApiClient().InitFromAff4Object(client_obj))

                index += 1
                if index >= end:
                    break

        UpdateClientsFromFleetspeak(api_clients)
        return ApiSearchClientsResult(items=api_clients)
Exemplo n.º 4
0
 def testRelationalDBReadsDisabled(self):
     self.assertFalse(data_store.RelationalDBReadEnabled())
Exemplo n.º 5
0
    def ReadBuffer(self, responses):
        """Read the buffer and write to the file."""
        # Did it work?
        if responses.success:
            response = responses.First()
            if not response:
                raise IOError("Missing hash for offset %s missing" %
                              response.offset)

            if response.offset <= self.state.max_chunk_number * self.CHUNK_SIZE:
                # Response.data is the hash of the block (32 bytes) and
                # response.length is the length of the block.
                self.state.blobs.append((response.data, response.length))
                self.Log("Received blob hash %s", response.data.encode("hex"))

                # Add one more chunk to the window.
                self.FetchWindow(1)

            if response.offset + response.length >= self.state.file_size:
                # File is complete.
                stat_entry = self.state.stat_entry
                urn = self.state.stat_entry.AFF4Path(self.client_urn)

                # TODO(user): when all the code can read files from REL_DB,
                # protect this with:
                # if not data_store.RelationalDBReadEnabled(category="filestore"):
                if data_store.AFF4Enabled():
                    with aff4.FACTORY.Create(urn,
                                             aff4_grr.VFSBlobImage,
                                             token=self.token) as fd:
                        fd.SetChunksize(self.CHUNK_SIZE)
                        fd.Set(fd.Schema.STAT(stat_entry))

                        for data, length in self.state.blobs:
                            fd.AddBlob(rdf_objects.BlobID.FromBytes(data),
                                       length)
                            fd.Set(fd.Schema.CONTENT_LAST,
                                   rdfvalue.RDFDatetime.Now())

                if data_store.RelationalDBWriteEnabled():
                    path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)

                    # Adding files to filestore requires reading data from RELDB,
                    # thus protecting this code with a filestore-read-enabled check.
                    if data_store.RelationalDBReadEnabled("filestore"):
                        blob_refs = []
                        offset = 0
                        for data, size in self.state.blobs:
                            blob_refs.append(
                                rdf_objects.BlobReference(
                                    offset=offset,
                                    size=size,
                                    blob_id=rdf_objects.BlobID.FromBytes(
                                        data)))
                            offset += size

                        client_path = db.ClientPath.FromPathInfo(
                            self.client_id, path_info)
                        hash_id = file_store.AddFileWithUnknownHash(
                            client_path, blob_refs)

                        path_info.hash_entry.sha256 = hash_id.AsBytes()

                    data_store.REL_DB.WritePathInfos(self.client_id,
                                                     [path_info])

                # Save some space.
                del self.state["blobs"]
                self.state.success = True
Exemplo n.º 6
0
    def NewFunction(self, *args, **kw):
        if data_store.RelationalDBReadEnabled():
            self.skipTest("Test is not RELDB-friendly. Skipping...")

        return f(self, *args, **kw)
Exemplo n.º 7
0
    def testForceRunCronJob(self):
        cronjobs.GetCronManager().EnableJob(job_id=u"OSBreakDown")

        with test_lib.FakeTime(
                # 2274264646 corresponds to Sat, 25 Jan 2042 12:10:46 GMT.
                rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2274264646),
                increment=1e-6):
            self.Open("/")
            self.Click("css=a[grrtarget=crons]")
            self.Click("css=td:contains('OSBreakDown')")

            # Click on Force Run button and check that dialog appears.
            self.Click("css=button[name=ForceRunCronJob]:not([disabled])")
            self.WaitUntil(
                self.IsTextPresent,
                "Are you sure you want to FORCE-RUN this cron job?")

            # Click on "Proceed" and wait for authorization dialog to appear.
            self.Click("css=button[name=Proceed]")
            self.WaitUntil(self.IsTextPresent, "Create a new approval")

            self.Click("css=grr-request-approval-dialog button[name=Cancel]")
            # Wait for dialog to disappear.
            self.WaitUntilNot(self.IsVisible, "css=.modal-open")

            self.RequestAndGrantCronJobApproval(u"OSBreakDown")

            # Click on Force Run button and check that dialog appears.
            self.Click("css=button[name=ForceRunCronJob]:not([disabled])")
            self.WaitUntil(
                self.IsTextPresent,
                "Are you sure you want to FORCE-RUN this cron job?")

            # Click on "Proceed" and wait for success label to appear.
            # Also check that "Proceed" button gets disabled.
            self.Click("css=button[name=Proceed]")

            self.WaitUntil(self.IsTextPresent,
                           "Cron job flow was FORCE-STARTED successfully!")
            self.assertFalse(self.IsElementPresent("css=button[name=Proceed]"))

            # Click on "Close" and check that dialog disappears.
            self.Click("css=button[name=Close]")
            self.WaitUntilNot(self.IsVisible, "css=.modal-open")

            # Relational cron jobs will only be run the next time a worker checks in.
            if data_store.RelationalDBReadEnabled():
                manager = cronjobs.GetCronManager()
                manager.RunOnce(token=self.token)
                manager._GetThreadPool().Join()

            # TODO(amoser): The lower pane does not refresh automatically so we need
            # to workaround. Remove when we have implemented this auto refresh.
            self.Open("/")
            self.Click("css=a[grrtarget=crons]")
            self.Click("css=td:contains('OSBreakDown')")

            # View should be refreshed automatically. The last run date should appear.
            self.WaitUntil(
                self.IsElementPresent, "css=grr-cron-jobs-list "
                "tr:contains('OSBreakDown') td:contains('2042')")
Exemplo n.º 8
0
    def Run(self):
        client_id_1 = self.SetupClient(0).Basename()
        client_id_2 = self.SetupClient(1).Basename()

        if data_store.RelationalDBReadEnabled("hunts"):
            with test_lib.FakeTime(42):
                hunt_id = self.CreateHunt(description="the hunt")

            with test_lib.FakeTime(52):
                flow_id = flow_test_lib.StartFlow(
                    flows_processes.ListProcesses,
                    client_id=client_id_1,
                    parent_hunt_id=hunt_id)
                flow_obj = data_store.REL_DB.ReadFlowObject(
                    client_id_1, flow_id)
                flow_obj.flow_state = flow_obj.FlowState.ERROR
                flow_obj.error_message = "Error foo."
                data_store.REL_DB.UpdateFlow(client_id_1,
                                             flow_id,
                                             flow_obj=flow_obj)

            with test_lib.FakeTime(55):
                flow_id = flow_test_lib.StartFlow(
                    flows_processes.ListProcesses,
                    client_id=client_id_2,
                    parent_hunt_id=hunt_id)
                flow_obj = data_store.REL_DB.ReadFlowObject(
                    client_id_2, flow_id)
                flow_obj.flow_state = flow_obj.FlowState.ERROR
                flow_obj.error_message = "Error bar."
                flow_obj.backtrace = "<some backtrace>"
                data_store.REL_DB.UpdateFlow(client_id_2,
                                             flow_id,
                                             flow_obj=flow_obj)

        else:
            with test_lib.FakeTime(42):
                with self.CreateHunt(description="the hunt") as hunt_obj:
                    hunt_id = hunt_obj.urn.Basename()

                    with test_lib.FakeTime(52):
                        hunt_obj.LogClientError(
                            rdf_client.ClientURN(client_id_1), "Error foo.")

                    with test_lib.FakeTime(55):
                        hunt_obj.LogClientError(
                            rdf_client.ClientURN(client_id_2), "Error bar.",
                            "<some backtrace>")

        self.Check("ListHuntErrors",
                   args=hunt_plugin.ApiListHuntErrorsArgs(hunt_id=hunt_id),
                   replace={hunt_id: "H:123456"})
        self.Check("ListHuntErrors",
                   args=hunt_plugin.ApiListHuntErrorsArgs(hunt_id=hunt_id,
                                                          count=1),
                   replace={hunt_id: "H:123456"})
        self.Check("ListHuntErrors",
                   args=hunt_plugin.ApiListHuntErrorsArgs(hunt_id=hunt_id,
                                                          offset=1,
                                                          count=1),
                   replace={hunt_id: "H:123456"})
Exemplo n.º 9
0
    def Run(self):
        client_id = self.SetupClient(0).Basename()

        if data_store.RelationalDBReadEnabled("hunts"):
            hunt_id = self.CreateHunt()
            flow_id = flow_test_lib.StartFlow(flows_processes.ListProcesses,
                                              client_id=client_id,
                                              parent_hunt_id=hunt_id)

            with test_lib.FakeTime(
                    rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2)):
                data_store.REL_DB.WriteFlowResults([
                    rdf_flow_objects.FlowResult(
                        client_id=client_id,
                        flow_id=flow_id,
                        hunt_id=hunt_id,
                        payload=rdfvalue.RDFString("blah1"))
                ])

            with test_lib.FakeTime(
                    rdfvalue.RDFDatetime.FromSecondsSinceEpoch(43)):
                data_store.REL_DB.WriteFlowResults([
                    rdf_flow_objects.FlowResult(
                        client_id=client_id,
                        flow_id=flow_id,
                        hunt_id=hunt_id,
                        payload=rdfvalue.RDFString("blah2-foo"))
                ])
        else:
            hunt_urn = rdfvalue.RDFURN("aff4:/hunts/H:123456")
            hunt_id = hunt_urn.Basename()

            results = implementation.GRRHunt.ResultCollectionForHID(hunt_urn)
            with data_store.DB.GetMutationPool() as pool:
                result = rdf_flows.GrrMessage(
                    source=client_id,
                    payload=rdfvalue.RDFString("blah1"),
                    age=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
                results.Add(result,
                            timestamp=result.age + rdfvalue.Duration("1s"),
                            mutation_pool=pool)

                result = rdf_flows.GrrMessage(
                    source=client_id,
                    payload=rdfvalue.RDFString("blah2-foo"),
                    age=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(42))
                results.Add(result,
                            timestamp=result.age + rdfvalue.Duration("1s"),
                            mutation_pool=pool)

        replace = {hunt_id: "H:123456"}
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id),
                   replace=replace)
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id,
                                                           count=1),
                   replace=replace)
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id,
                                                           offset=1,
                                                           count=1),
                   replace=replace)
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id,
                                                           filter="foo"),
                   replace=replace)
Exemplo n.º 10
0
def GetCompatClass():
    """Returns the (Aff4)CollectionArchiveGenerator class."""
    if data_store.RelationalDBReadEnabled("filestore"):
        return CollectionArchiveGenerator
    else:
        return Aff4CollectionArchiveGenerator
Exemplo n.º 11
0
    def Run(self):
        if data_store.RelationalDBReadEnabled("hunts"):
            with test_lib.FakeTime(42):
                hunt_id = self.CreateHunt()

            client_id = self.SetupClient(0).Basename()
            flow_id = flow_test_lib.StartFlow(flows_processes.ListProcesses,
                                              client_id=client_id,
                                              parent_hunt_id=hunt_id)

            with test_lib.FakeTime(52):
                data_store.REL_DB.WriteFlowLogEntries([
                    rdf_flow_objects.FlowLogEntry(
                        client_id=client_id,
                        flow_id=flow_id,
                        hunt_id=hunt_id,
                        message="Sample message: foo")
                ])

            with test_lib.FakeTime(55):
                data_store.REL_DB.WriteFlowLogEntries([
                    rdf_flow_objects.FlowLogEntry(
                        client_id=client_id,
                        flow_id=flow_id,
                        hunt_id=hunt_id,
                        message="Sample message: bar")
                ])
        else:
            with test_lib.FakeTime(42):
                client_id = self.SetupClient(0)
                flow_id = "H:123456"
                with self.CreateHunt(description="the hunt") as hunt_obj:
                    hunt_id = hunt_obj.urn.Basename()
                    logs_collection_urn = hunt_obj.logs_collection_urn

                log_entry = rdf_flows.FlowLog(
                    client_id=client_id,
                    urn=client_id.Add(flow_id),
                    flow_name=hunt_obj.__class__.__name__,
                    log_message="Sample message: foo")
                with test_lib.FakeTime(52):
                    with data_store.DB.GetMutationPool() as pool:
                        grr_collections.LogCollection.StaticAdd(
                            logs_collection_urn, log_entry, mutation_pool=pool)

                log_entry = rdf_flows.FlowLog(
                    client_id=client_id,
                    urn=client_id.Add(flow_id),
                    flow_name=hunt_obj.__class__.__name__,
                    log_message="Sample message: bar")
                with test_lib.FakeTime(55):
                    with data_store.DB.GetMutationPool() as pool:
                        grr_collections.LogCollection.StaticAdd(
                            logs_collection_urn, log_entry, mutation_pool=pool)

        self.Check("ListHuntLogs",
                   args=hunt_plugin.ApiListHuntLogsArgs(hunt_id=hunt_id),
                   replace={hunt_id: "H:123456"})
        self.Check("ListHuntLogs",
                   args=hunt_plugin.ApiListHuntLogsArgs(hunt_id=hunt_id,
                                                        count=1),
                   replace={hunt_id: "H:123456"})
        self.Check("ListHuntLogs",
                   args=hunt_plugin.ApiListHuntLogsArgs(hunt_id=hunt_id,
                                                        offset=1,
                                                        count=1),
                   replace={hunt_id: "H:123456"})
Exemplo n.º 12
0
    def testMultiGetFileMultiFiles(self):
        """Test MultiGetFile downloading many files at once."""
        client_mock = action_mocks.MultiGetFileClientMock()

        pathspecs = []
        # Make 30 files to download.
        for i in range(30):
            path = os.path.join(self.temp_dir, "test_%s.txt" % i)
            with open(path, "wb") as fd:
                fd.write("Hello")

            pathspecs.append(
                rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS,
                                   path=path))

        args = transfer.MultiGetFileArgs(pathspecs=pathspecs,
                                         maximum_pending_files=10)
        session_id = flow_test_lib.TestFlowHelper(
            transfer.MultiGetFile.__name__,
            client_mock,
            token=self.token,
            client_id=self.client_id,
            args=args)

        if data_store.RelationalDBReadEnabled(category="filestore"):
            # Now open each file and make sure the data is there.
            for pathspec in pathspecs:
                cp = db.ClientPath.FromPathSpec(self.client_id.Basename(),
                                                pathspec)
                fd_rel_db = file_store.OpenFile(cp)
                self.assertEqual("Hello", fd_rel_db.read())

                # Check that SHA256 hash of the file matches the contents
                # hash and that MD5 and SHA1 are set.
                history = data_store.REL_DB.ReadPathInfoHistory(
                    cp.client_id, cp.path_type, cp.components)
                self.assertEqual(history[-1].hash_entry.sha256,
                                 fd_rel_db.hash_id.AsBytes())
                self.assertIsNotNone(history[-1].hash_entry.sha1)
                self.assertIsNotNone(history[-1].hash_entry.md5)
        else:
            # Check up on the internal flow state.
            flow_state = self._GetFlowState(self.client_id, session_id)
            # All the pathspecs should be in this list.
            self.assertEqual(len(flow_state.indexed_pathspecs), 30)

            # At any one time, there should not be more than 10 files or hashes
            # pending.
            self.assertLessEqual(len(flow_state.pending_files), 10)
            self.assertLessEqual(len(flow_state.pending_hashes), 10)

            # When we finish there should be no pathspecs stored in the flow state.
            for flow_pathspec in flow_state.indexed_pathspecs:
                self.assertIsNone(flow_pathspec)
            for flow_request_data in flow_state.request_data_list:
                self.assertIsNone(flow_request_data)

            for pathspec in pathspecs:
                urn = pathspec.AFF4Path(self.client_id)
                fd = aff4.FACTORY.Open(urn, token=self.token)
                self.assertEqual("Hello", fd.read())
Exemplo n.º 13
0
    def testMultiGetFileOfSpecialFiles(self):
        """Test that special /proc/ files are handled correctly.

    /proc/ files have the property that they are non seekable from their end
    (i.e. seeking them relative to the end is not supported). They also return
    an st_size of 0. For example:

    $ stat /proc/self/maps
    File: '/proc/self/maps'
    Size: 0   Blocks: 0   IO Block: 1024 regular empty file

    $ head /proc/self/maps
    00400000-00409000 r-xp 00000000 fc:01 9180740 /usr/bin/head
    00608000-00609000 r--p 00008000 fc:01 9180740 /usr/bin/head
    ...

    When we try to use the MultiGetFile flow, it deduplicates the files and
    since it thinks the file has a zero size, the flow will not download the
    file, and instead copy the zero size file into it.
    """
        client_mock = action_mocks.MultiGetFileClientMock()

        # # Create a zero sized file.
        zero_sized_filename = os.path.join(self.temp_dir, "zero_size")
        with open(zero_sized_filename, "wb") as fd:
            pass

        pathspec = rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS,
                                      path=zero_sized_filename)

        flow_test_lib.TestFlowHelper(transfer.MultiGetFile.__name__,
                                     client_mock,
                                     token=self.token,
                                     file_size="1MiB",
                                     client_id=self.client_id,
                                     pathspecs=[pathspec])

        # Now if we try to fetch a real /proc/ filename this will fail because the
        # filestore already contains the zero length file
        # aff4:/files/nsrl/da39a3ee5e6b4b0d3255bfef95601890afd80709.
        pathspec = rdf_paths.PathSpec(pathtype=rdf_paths.PathSpec.PathType.OS,
                                      path="/proc/self/environ")

        flow_test_lib.TestFlowHelper(transfer.MultiGetFile.__name__,
                                     client_mock,
                                     token=self.token,
                                     file_size=1024 * 1024,
                                     client_id=self.client_id,
                                     pathspecs=[pathspec])

        data = open(pathspec.last.path, "rb").read()

        if data_store.RelationalDBReadEnabled(category="filestore"):
            cp = db.ClientPath.FromPathSpec(self.client_id.Basename(),
                                            pathspec)
            fd_rel_db = file_store.OpenFile(cp)
            self.assertEqual(fd_rel_db.size, len(data))
            self.assertMultiLineEqual(fd_rel_db.read(), data)

            # Check that SHA256 hash of the file matches the contents
            # hash and that MD5 and SHA1 are set.
            history = data_store.REL_DB.ReadPathInfoHistory(
                cp.client_id, cp.path_type, cp.components)
            self.assertEqual(history[-1].hash_entry.sha256,
                             fd_rel_db.hash_id.AsBytes())
            self.assertIsNotNone(history[-1].hash_entry.sha1)
            self.assertIsNotNone(history[-1].hash_entry.md5)
        else:
            # Test the AFF4 file that was created - it should be empty since by
            # default we judge the file size based on its stat.st_size.
            urn = pathspec.AFF4Path(self.client_id)
            fd = aff4.FACTORY.Open(urn, token=self.token)
            self.assertEqual(fd.size, len(data))
            self.assertMultiLineEqual(fd.read(len(data)), data)
Exemplo n.º 14
0
    def testLabelsHuntRuleMatchesCorrectClients(self):
        client_ids = self.SetupClients(10)

        self.AddClientLabel(client_ids[1], u"owner1", u"foo")
        self.AddClientLabel(client_ids[1], u"owner2", u"bar")
        self.AddClientLabel(client_ids[7], u"GRR", u"bar")

        self.Open("/#main=ManageHunts")
        self.Click("css=button[name=NewHunt]")

        # Select "List Processes" flow.
        self.Click("css=#_Processes > i.jstree-icon")
        self.Click("link=ListProcesses")

        # Click 'Next' to go to the output plugins page, hunt parameters page
        # and then to hunt rules page.
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Hunt parameters')")
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(
            self.IsElementPresent,
            "css=grr-wizard-form:contains('How to process results')")
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Where to run?')")

        # Select 'Clients With Label' rule.
        self.Click("css=grr-configure-rules-page button[name=Add]")
        self.Select("css=grr-new-hunt-wizard-form div.well select", "Label")
        self.Select(
            "css=grr-new-hunt-wizard-form div.well .form-group "
            ".form-group:has(label:contains('Label')):nth-last-of-type(1) "
            "select", "foo")
        self.Click("css=grr-new-hunt-wizard-form div.well .form-group "
                   ".form-group:has(label:contains('Add label')) button")
        self.Select(
            "css=grr-new-hunt-wizard-form div.well .form-group "
            ".form-group:has(label:contains('Label')):nth-last-of-type(1) "
            "select", "bar")
        self.Select(
            "css=grr-new-hunt-wizard-form div.well .form-group "
            ".form-group:has(label:contains('Match mode')) select",
            "Match any")

        # Click 'Next' to go to hunt overview page.  Then click 'Next' to go to
        # submit the hunt and wait until it's created.
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Created Hunt')")

        if data_store.RelationalDBReadEnabled():
            hunts_list = sorted(data_store.REL_DB.ReadHuntObjects(offset=0,
                                                                  count=10),
                                key=lambda x: x.create_time)
            hunt = hunts_list[0]
            lib_hunt.StartHunt(hunt.hunt_id)
        else:
            hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
            hunts_list = list(hunts_root.OpenChildren())
            hunt = hunts_list[0]

            with aff4.FACTORY.Open(hunt.urn, mode="rw",
                                   token=self.token) as hunt:
                hunt.Run()

        foreman_obj = foreman.GetForeman(token=self.token)
        for client_id in client_ids:
            tasks_assigned = foreman_obj.AssignTasksToClient(
                client_id.Basename())
            if client_id in [client_ids[1], client_ids[7]]:
                self.assertTrue(tasks_assigned)
            else:
                self.assertFalse(tasks_assigned)
Exemplo n.º 15
0
def GetForeman(token=None):
  if data_store.RelationalDBReadEnabled(category="foreman"):
    return Foreman()
  else:
    return aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=token)
Exemplo n.º 16
0
    def ProcessMessages(self, msgs=None, token=None):
        """Processes this event."""
        nanny_msg = ""

        for crash_details in msgs:
            client_urn = crash_details.client_id
            client_id = client_urn.Basename()

            # The session id of the flow that crashed.
            session_id = crash_details.session_id

            flow_obj = aff4.FACTORY.Open(session_id, token=token)

            # Log.
            logging.info("Client crash reported, client %s.", client_urn)

            # Export.
            stats.STATS.IncrementCounter("grr_client_crashes")

            # Write crash data to AFF4.
            if data_store.RelationalDBReadEnabled():
                client = data_store.REL_DB.ReadClientSnapshot(client_id)
                client_info = client.startup_info.client_info
            else:
                client = aff4.FACTORY.Open(client_urn, token=token)
                client_info = client.Get(client.Schema.CLIENT_INFO)

            crash_details.client_info = client_info
            crash_details.crash_type = "Client Crash"

            WriteAllCrashDetails(client_id,
                                 crash_details,
                                 flow_session_id=session_id,
                                 token=token)

            # Also send email.
            to_send = []

            try:
                hunt_session_id = ExtractHuntId(session_id)
                if hunt_session_id and hunt_session_id != session_id:
                    hunt_obj = aff4.FACTORY.Open(
                        hunt_session_id,
                        aff4_type=implementation.GRRHunt,
                        token=token)
                    email = hunt_obj.runner_args.crash_alert_email
                    if email:
                        to_send.append(email)
            except aff4.InstantiationError:
                logging.error("Failed to open hunt %s.", hunt_session_id)

            email = config.CONFIG["Monitoring.alert_email"]
            if email:
                to_send.append(email)

            for email_address in to_send:
                if crash_details.nanny_status:
                    nanny_msg = "Nanny status: %s" % crash_details.nanny_status

                client = aff4.FACTORY.Open(client_urn, token=token)
                hostname = client.Get(client.Schema.HOSTNAME)
                url = "/clients/%s" % client_id

                body = self.__class__.mail_template.render(
                    client_id=client_id,
                    admin_ui=config.CONFIG["AdminUI.url"],
                    hostname=utils.SmartUnicode(hostname),
                    context=utils.SmartUnicode(flow_obj.context),
                    state=utils.SmartUnicode(flow_obj.state),
                    args=utils.SmartUnicode(flow_obj.args),
                    runner_args=utils.SmartUnicode(flow_obj.runner_args),
                    urn=url,
                    nanny_msg=utils.SmartUnicode(nanny_msg),
                    signature=config.CONFIG["Email.signature"])
                email_alerts.EMAIL_ALERTER.SendEmail(
                    email_address,
                    "GRR server",
                    "Client %s reported a crash." % client_id,
                    utils.SmartStr(body),
                    is_html=True)

            if nanny_msg:
                msg = "Client crashed, " + nanny_msg
            else:
                msg = "Client crashed."

            # Now terminate the flow.
            flow.GRRFlow.TerminateAFF4Flow(session_id, reason=msg, token=token)
Exemplo n.º 17
0
    def Platform(self, responses):
        """Stores information about the platform."""
        if responses.success:
            response = responses.First()

            if data_store.AFF4Enabled():
                # AFF4 client.
                with self._OpenClient(mode="rw") as client:
                    # For backwards compatibility.

                    # These need to be in separate attributes because they get searched on
                    # in the GUI.
                    client.Set(client.Schema.HOSTNAME(response.fqdn))
                    client.Set(client.Schema.SYSTEM(response.system))
                    client.Set(client.Schema.OS_RELEASE(response.release))
                    client.Set(client.Schema.OS_VERSION(response.version))
                    client.Set(client.Schema.KERNEL(response.kernel))
                    client.Set(client.Schema.FQDN(response.fqdn))

                    # response.machine is the machine value of platform.uname()
                    # On Windows this is the value of:
                    # HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session
                    # Manager\Environment\PROCESSOR_ARCHITECTURE
                    # "AMD64", "IA64" or "x86"
                    client.Set(client.Schema.ARCH(response.machine))
                    client.Set(
                        client.Schema.UNAME("%s-%s-%s" %
                                            (response.system, response.release,
                                             response.version)))

                    # Update the client index
                    client_index.CreateClientIndex(
                        token=self.token).AddClient(client)

                if response.system == "Windows":
                    with aff4.FACTORY.Create(self.client_urn.Add("registry"),
                                             standard.VFSDirectory,
                                             token=self.token) as fd:
                        fd.Set(
                            fd.Schema.PATHSPEC,
                            fd.Schema.PATHSPEC(
                                path="/",
                                pathtype=rdf_paths.PathSpec.PathType.REGISTRY))

            # rdf_objects.ClientSnapshot.
            client = self.state.client
            client.os_release = response.release
            client.os_version = response.version
            client.kernel = response.kernel
            client.arch = response.machine
            # Store these for later, there might be more accurate data
            # coming in from the artifact collector.
            self.state.fqdn = response.fqdn
            self.state.os = response.system

            if data_store.RelationalDBWriteEnabled():
                try:
                    # Update the client index
                    client_index.ClientIndex().AddClient(client)
                except db.UnknownClientError:
                    pass

            # No support for OS X cloud machines as yet.
            if response.system in ["Linux", "Windows"]:
                self.CallClient(server_stubs.GetCloudVMMetadata,
                                rdf_cloud.BuildCloudMetadataRequests(),
                                next_state="CloudMetadata")

            known_system_type = True
        else:
            # We failed to get the Platform info, maybe there is a stored
            # system we can use to get at least some data.
            if data_store.RelationalDBReadEnabled():
                client = data_store.REL_DB.ReadClientSnapshot(self.client_id)
                known_system_type = client and client.knowledge_base.os
            else:
                client = self._OpenClient()
                known_system_type = client.Get(client.Schema.SYSTEM)

            self.Log("Could not retrieve Platform info.")

        if known_system_type:
            # We will accept a partial KBInit rather than raise, so pass
            # require_complete=False.
            self.CallFlow(artifact.KnowledgeBaseInitializationFlow.__name__,
                          require_complete=False,
                          lightweight=self.args.lightweight,
                          next_state="ProcessKnowledgeBase")
        else:
            self.Log(
                "Unknown system type, skipping KnowledgeBaseInitializationFlow"
            )
Exemplo n.º 18
0
 def testPurgeServerStats(self):
     if not data_store.RelationalDBReadEnabled():
         self.skipTest("Test is only for the relational DB. Skipping...")
     fake_stats_collector = prometheus_stats_collector.PrometheusStatsCollector(
         [
             stats_utils.CreateCounterMetadata("fake_counter"),
         ])
     timestamp0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1)
     timestamp1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2)
     timestamp2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600)
     timestamp3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4800)
     config_overrides = {
         "Database.useForReads.stats": True,
         "StatsStore.stats_ttl_hours": 1
     }
     zero_duration = rdfvalue.Duration(0)
     # Backslash continuation is explicitly allowed by Google's style guide for
     # nested context manager expressions spanning 3 or more lines.
     # pylint: disable=g-backslash-continuation
     with test_lib.ConfigOverrider(config_overrides), \
          stats_test_utils.FakeStatsContext(fake_stats_collector), \
          mock.patch.object(system, "_STATS_DELETION_BATCH_SIZE", 1), \
          mock.patch.object(system, "_stats_checkpoint_period", zero_duration):
         with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp0)):
             stats_store._WriteStats(process_id="fake_process_id")
         with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp1)):
             stats_collector_instance.Get().IncrementCounter("fake_counter")
             stats_store._WriteStats(process_id="fake_process_id")
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(0, timestamp0), (1, timestamp1)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
         with test_lib.FakeTime(timestamp2):
             stats_store._WriteStats(process_id="fake_process_id")
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(0, timestamp0), (1, timestamp1),
                                      (1, timestamp2)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
         with test_lib.FakeTime(timestamp3):
             cron_name = compatibility.GetName(
                 system.PurgeServerStatsCronJob)
             cronjobs.ScheduleSystemCronJobs(names=[cron_name])
             job_data = data_store.REL_DB.ReadCronJobs([cron_name])[0]
             cron_run = rdf_cronjobs.CronJobRun(cron_job_id=cron_name)
             cron_run.GenerateRunId()
             cron_run.started_at = rdfvalue.RDFDatetime.Now()
             cron = system.PurgeServerStatsCronJob(cron_run, job_data)
             cron.Run()
             # timestamp0 and timestamp1 are older than 1h, so they should get
             # deleted.
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(1, timestamp2)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
             self.assertEqual(
                 "Deleted 2 stats entries.\nDeleted 1 stats entries.",
                 cron.run_state.log_message)
Exemplo n.º 19
0
    def _LoadArtifactsFromDatastore(self):
        """Load artifacts from the data store."""
        loaded_artifacts = []

        # TODO(hanuszczak): Why do we have to remove anything? If some artifact
        # tries to shadow system artifact shouldn't we just ignore them and perhaps
        # issue some warning instead? The datastore being loaded should be read-only
        # during upload.

        # A collection of artifacts that shadow system artifacts and need
        # to be deleted from the data store.
        to_delete = []

        for artifact_coll_urn in self._sources.GetDatastores():
            artifact_coll = ArtifactCollection(artifact_coll_urn)

            if data_store.RelationalDBReadEnabled(category="artifacts"):
                artifact_list = data_store.REL_DB.ReadAllArtifacts()
            else:
                artifact_list = list(artifact_coll)

            for artifact_value in artifact_list:
                try:
                    self.RegisterArtifact(artifact_value,
                                          source="datastore:%s" %
                                          artifact_coll_urn,
                                          overwrite_if_exists=True)
                    loaded_artifacts.append(artifact_value)
                    logging.debug("Loaded artifact %s from %s",
                                  artifact_value.name, artifact_coll_urn)
                except rdf_artifacts.ArtifactDefinitionError as e:
                    # TODO(hanuszczak): String matching on exception message is rarely
                    # a good idea. Instead this should be refectored to some exception
                    # class and then handled separately.
                    if "system artifact" in e.message:
                        to_delete.append(artifact_value.name)
                    else:
                        raise

        if to_delete:
            DeleteArtifactsFromDatastore(to_delete, reload_artifacts=False)
            self._dirty = True

            # TODO(hanuszczak): This is connected to the previous TODO comment. Why
            # do we throw exception at this point? Why do we delete something and then
            # abort the whole upload procedure by throwing an exception?
            detail = "system artifacts were shadowed and had to be deleted"
            raise rdf_artifacts.ArtifactDefinitionError(to_delete, detail)

        # Once all artifacts are loaded we can validate.
        revalidate = True
        while revalidate:
            revalidate = False
            for artifact_obj in loaded_artifacts[:]:
                try:
                    Validate(artifact_obj)
                except rdf_artifacts.ArtifactDefinitionError as e:
                    logging.error("Artifact %s did not validate: %s",
                                  artifact_obj.name, e)
                    artifact_obj.error_message = utils.SmartStr(e)
                    loaded_artifacts.remove(artifact_obj)
                    revalidate = True
Exemplo n.º 20
0
    def Flush(self):
        """Writes the changes in this object to the datastore."""

        if data_store.RelationalDBReadEnabled(category="message_handlers"):
            message_handler_requests = []
            leftover_responses = []

            for r, timestamp in self.response_queue:
                if r.request_id == 0 and r.session_id in session_id_map:
                    message_handler_requests.append(
                        rdf_objects.MessageHandlerRequest(
                            client_id=r.source and r.source.Basename(),
                            handler_name=session_id_map[r.session_id],
                            request_id=r.response_id,
                            request=r.payload))
                else:
                    leftover_responses.append((r, timestamp))

            if message_handler_requests:
                data_store.REL_DB.WriteMessageHandlerRequests(
                    message_handler_requests)
            self.response_queue = leftover_responses

        self.data_store.StoreRequestsAndResponses(
            new_requests=self.request_queue,
            new_responses=self.response_queue,
            requests_to_delete=self.requests_to_delete)

        # We need to make sure that notifications are written after the requests so
        # we flush after writing all requests and only notify afterwards.
        mutation_pool = self.data_store.GetMutationPool()
        with mutation_pool:

            if data_store.RelationalDBReadEnabled(category="client_messages"):
                if self.client_messages_to_delete:
                    data_store.REL_DB.DeleteClientMessages(
                        list(itervalues(self.client_messages_to_delete)))
            else:
                messages_by_queue = utils.GroupBy(
                    list(itervalues(self.client_messages_to_delete)),
                    lambda request: request.queue)
                for queue, messages in iteritems(messages_by_queue):
                    self.Delete(queue, messages, mutation_pool=mutation_pool)

            if self.new_client_messages:
                for timestamp, messages in iteritems(
                        utils.GroupBy(self.new_client_messages,
                                      lambda x: x[1])):

                    self.Schedule([x[0] for x in messages],
                                  timestamp=timestamp,
                                  mutation_pool=mutation_pool)

        if self.notifications:
            for notification in itervalues(self.notifications):
                self.NotifyQueue(notification, mutation_pool=mutation_pool)

            mutation_pool.Flush()

        self.request_queue = []
        self.response_queue = []
        self.requests_to_delete = []

        self.client_messages_to_delete = {}
        self.notifications = {}
        self.new_client_messages = []
Exemplo n.º 21
0
def GetCronManager():
    if data_store.RelationalDBReadEnabled(category="cronjobs"):
        return cronjobs.CronManager()
    return CronManager()
Exemplo n.º 22
0
    def testGetClientStats(self):
        client_id = self.SetupClient(0)

        class ClientMock(action_mocks.ActionMock):
            def GetClientStats(self, _):
                """Fake get client stats method."""
                response = rdf_client_stats.ClientStats()
                for i in range(12):
                    sample = rdf_client_stats.CpuSample(timestamp=int(i * 10 *
                                                                      1e6),
                                                        user_cpu_time=10 + i,
                                                        system_cpu_time=20 + i,
                                                        cpu_percent=10 + i)
                    response.cpu_samples.Append(sample)

                    sample = rdf_client_stats.IOSample(timestamp=int(i * 10 *
                                                                     1e6),
                                                       read_bytes=10 + i,
                                                       write_bytes=10 + i)
                    response.io_samples.Append(sample)

                return [response]

        flow_test_lib.TestFlowHelper(administrative.GetClientStats.__name__,
                                     ClientMock(),
                                     token=self.token,
                                     client_id=client_id)

        if data_store.RelationalDBReadEnabled("client_stats"):
            samples = data_store.REL_DB.ReadClientStats(
                client_id=client_id.Basename(),
                min_timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0),
                max_timestamp=rdfvalue.RDFDatetime.Now())
            self.assertNotEmpty(samples)
            sample = samples[0]
        else:
            urn = client_id.Add("stats")
            stats_fd = aff4.FACTORY.Create(urn,
                                           aff4_stats.ClientStats,
                                           token=self.token,
                                           mode="rw")
            sample = stats_fd.Get(stats_fd.Schema.STATS)

        # Samples are taken at the following timestamps and should be split into 2
        # bins as follows (sample_interval is 60000000):

        # 00000000, 10000000, 20000000, 30000000, 40000000, 50000000  -> Bin 1
        # 60000000, 70000000, 80000000, 90000000, 100000000, 110000000  -> Bin 2

        self.assertLen(sample.cpu_samples, 2)
        self.assertLen(sample.io_samples, 2)

        self.assertAlmostEqual(sample.io_samples[0].read_bytes, 15.0)
        self.assertAlmostEqual(sample.io_samples[1].read_bytes, 21.0)

        self.assertAlmostEqual(sample.cpu_samples[0].cpu_percent,
                               sum(range(10, 16)) / 6.0)
        self.assertAlmostEqual(sample.cpu_samples[1].cpu_percent,
                               sum(range(16, 22)) / 6.0)

        self.assertAlmostEqual(sample.cpu_samples[0].user_cpu_time, 15.0)
        self.assertAlmostEqual(sample.cpu_samples[1].system_cpu_time, 31.0)
Exemplo n.º 23
0
 def testRelationalDBReadsEnabled(self):
     if not getattr(self, "aff4_only_test", False):
         self.assertTrue(data_store.RelationalDBReadEnabled())
Exemplo n.º 24
0
    def setUp(self):
        super(ApiLabelsRestrictedCallRouterTest, self).setUp()

        self.client_urn = self.SetupClient(0)
        if data_store.RelationalDBReadEnabled():
            data_store.REL_DB.AddClientLabels(self.client_urn.Basename(),
                                              "GRR", ["foo"])
        else:
            with aff4.FACTORY.Open(self.client_urn,
                                   mode="rw",
                                   token=self.token) as fd:
                fd.AddLabel("foo", owner="GRR")
        self.client_id = self.client_urn.Basename()

        self.hunt_id = "H:123456"

        c = api_router.ApiLabelsRestrictedCallRouter

        self.checks = {}

        # Artifacts methods.
        self.CheckMethod(c.ListArtifacts)
        self.CheckMethod(c.UploadArtifact)
        self.CheckMethod(c.DeleteArtifacts)

        # Clients methods
        self.CheckMethod(c.SearchClients)
        self.CheckMethod(c.GetClient, client_id=self.client_id)
        self.CheckMethod(c.GetClientVersions, client_id=self.client_id)
        self.CheckMethod(c.GetClientVersionTimes, client_id=self.client_id)
        self.CheckMethod(c.InterrogateClient, client_id=self.client_id)
        self.CheckMethod(c.GetInterrogateOperationState)
        self.CheckMethod(c.GetLastClientIPAddress, client_id=self.client_id)

        # Virtual file system methods.
        self.CheckMethod(c.ListFiles, client_id=self.client_id)
        self.CheckMethod(c.GetFileDetails, client_id=self.client_id)
        self.CheckMethod(c.GetFileText, client_id=self.client_id)
        self.CheckMethod(c.GetFileBlob, client_id=self.client_id)
        self.CheckMethod(c.GetFileVersionTimes, client_id=self.client_id)
        self.CheckMethod(c.GetFileDownloadCommand, client_id=self.client_id)
        self.CheckMethod(c.CreateVfsRefreshOperation, client_id=self.client_id)
        self.CheckMethod(c.GetVfsRefreshOperationState)
        self.CheckMethod(c.GetVfsTimeline, client_id=self.client_id)
        self.CheckMethod(c.GetVfsTimelineAsCsv, client_id=self.client_id)

        # Clients labels methods.
        self.CheckMethod(c.ListClientsLabels)
        self.CheckMethod(c.AddClientsLabels, client_ids=[self.client_id])
        self.CheckMethod(c.RemoveClientsLabels, client_ids=[self.client_id])

        # Clients flows methods.
        self.CheckMethod(c.ListFlows, client_id=self.client_id)
        self.CheckMethod(c.GetFlow, client_id=self.client_id)
        self.CheckMethod(
            c.CreateFlow,
            client_id=self.client_id,
            flow=api_flow.ApiFlow(name=processes.ListProcesses.__name__))
        self.CheckMethod(c.CancelFlow, client_id=self.client_id)
        self.CheckMethod(c.ListFlowResults, client_id=self.client_id)
        self.CheckMethod(c.GetFlowResultsExportCommand,
                         client_id=self.client_id)
        self.CheckMethod(c.GetFlowFilesArchive, client_id=self.client_id)
        self.CheckMethod(c.ListFlowOutputPlugins, client_id=self.client_id)
        self.CheckMethod(c.ListFlowOutputPluginLogs, client_id=self.client_id)
        self.CheckMethod(c.ListFlowOutputPluginErrors,
                         client_id=self.client_id)
        self.CheckMethod(c.ListFlowLogs, client_id=self.client_id)

        # Cron jobs methods.
        self.CheckMethod(c.ListCronJobs)
        self.CheckMethod(c.CreateCronJob)
        self.CheckMethod(c.DeleteCronJob)

        # Hunts methods.
        self.CheckMethod(c.ListHunts)
        self.CheckMethod(c.GetHunt, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntErrors, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntLogs, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntResults, hunt_id=self.hunt_id)
        self.CheckMethod(c.GetHuntResultsExportCommand, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntOutputPlugins, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntOutputPluginLogs, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntOutputPluginErrors, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntCrashes, hunt_id=self.hunt_id)
        self.CheckMethod(c.GetHuntClientCompletionStats, hunt_id=self.hunt_id)
        self.CheckMethod(c.GetHuntStats, hunt_id=self.hunt_id)
        self.CheckMethod(c.ListHuntClients, hunt_id=self.hunt_id)
        self.CheckMethod(c.GetHuntContext, hunt_id=self.hunt_id)
        self.CheckMethod(c.CreateHunt)
        self.CheckMethod(c.GetHuntFilesArchive, hunt_id=self.hunt_id)
        self.CheckMethod(c.GetHuntFile, hunt_id=self.hunt_id)

        # Approvals methods.
        self.CheckMethod(c.CreateClientApproval, client_id=self.client_id)
        self.CheckMethod(c.GetClientApproval, client_id=self.client_id)
        self.CheckMethod(c.ListClientApprovals, client_id=self.client_id)
        self.CheckMethod(c.ListHuntApprovals)
        self.CheckMethod(c.ListCronJobApprovals)

        # User settings methods.
        self.CheckMethod(c.GetPendingUserNotificationsCount)
        self.CheckMethod(c.ListPendingUserNotifications)
        self.CheckMethod(c.DeletePendingUserNotification)
        self.CheckMethod(c.ListAndResetUserNotifications)
        self.CheckMethod(c.GetGrrUser)
        self.CheckMethod(c.UpdateGrrUser)

        # Config methods.
        self.CheckMethod(c.GetConfig)
        self.CheckMethod(c.GetConfigOption)

        # Reflection methods.
        self.CheckMethod(c.ListKbFields)
        self.CheckMethod(c.ListFlowDescriptors)
        self.CheckMethod(c.ListAff4AttributeDescriptors)
        self.CheckMethod(c.GetRDFValueDescriptor)
        self.CheckMethod(c.ListRDFValuesDescriptors)
        self.CheckMethod(c.ListOutputPluginDescriptors)
        self.CheckMethod(c.ListKnownEncodings)
        self.CheckMethod(c.ListApiMethods)

        non_checked_methods = (set(iterkeys(self.checks)) -
                               set(iterkeys(c.GetAnnotatedMethods())))
        if non_checked_methods:
            raise RuntimeError(
                "Not all methods are covered with CheckMethod() "
                "checks: " + ", ".join(non_checked_methods))
Exemplo n.º 25
0
    def _CheckHashesWithFileStore(self):
        """Check all queued up hashes for existence in file store.

    Hashes which do not exist in the file store will be downloaded. This
    function flushes the entire queue (self.state.pending_hashes) in order to
    minimize the round trips to the file store.

    If a file was found in the file store it is not scheduled for collection
    and its PathInfo is written to the datastore pointing to the file store's
    hash. Otherwise, we request the client to hash every block in the file,
    and add it to the file tracking queue (self.state.pending_files).
    """
        if not data_store.RelationalDBReadEnabled(category="filestore"):
            return self._LegacyCheckHashesWithFileStore()

        if not self.state.pending_hashes:
            return

        # This map represents all the hashes in the pending urns.
        file_hashes = {}

        # Store a mapping of hash to tracker. Keys are hashdigest objects,
        # values are arrays of tracker dicts.
        hash_to_tracker = {}
        for index, tracker in iteritems(self.state.pending_hashes):

            # We might not have gotten this hash yet
            if tracker.get("hash_obj") is None:
                continue

            hash_obj = tracker["hash_obj"]
            digest = hash_obj.sha256
            file_hashes[index] = hash_obj
            hash_to_tracker.setdefault(rdf_objects.SHA256HashID(digest),
                                       []).append(tracker)

        # First we get all the files which are present in the file store.
        files_in_filestore = set()

        statuses = file_store.CheckHashes([
            rdf_objects.SHA256HashID.FromBytes(ho.sha256.AsBytes())
            for ho in itervalues(file_hashes)
        ])
        for hash_id, status in iteritems(statuses):
            self.HeartBeat()

            if not status:
                continue

            # Since checkhashes only returns one digest per unique hash we need to
            # find any other files pending download with the same hash.
            for tracker in hash_to_tracker[hash_id]:
                self.state.files_skipped += 1
                file_hashes.pop(tracker["index"])
                files_in_filestore.add(hash_id)
                # Remove this tracker from the pending_hashes store since we no longer
                # need to process it.
                self.state.pending_hashes.pop(tracker["index"])

        # Now that the check is done, reset our counter
        self.state.files_hashed_since_check = 0
        # Now copy all existing files to the client aff4 space.
        for hash_id in files_in_filestore:

            for file_tracker in hash_to_tracker.get(hash_id, []):
                stat_entry = file_tracker["stat_entry"]
                path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
                path_info.hash_entry = file_tracker["hash_obj"]
                data_store.REL_DB.WritePathInfos(self.client_id, [path_info])

                # Report this hit to the flow's caller.
                self._ReceiveFetchedFile(file_tracker)

        # Now we iterate over all the files which are not in the store and arrange
        # for them to be copied.
        for index in file_hashes:

            # Move the tracker from the pending hashes store to the pending files
            # store - it will now be downloaded.
            file_tracker = self.state.pending_hashes.pop(index)
            self.state.pending_files[index] = file_tracker

            # If we already know how big the file is we use that, otherwise fall back
            # to the size reported by stat.
            if file_tracker["bytes_read"] > 0:
                file_tracker["size_to_download"] = file_tracker["bytes_read"]
            else:
                file_tracker["size_to_download"] = file_tracker[
                    "stat_entry"].st_size

            # We do not have the file here yet - we need to retrieve it.
            expected_number_of_hashes = (
                file_tracker["size_to_download"] // self.CHUNK_SIZE + 1)

            # We just hash ALL the chunks in the file now. NOTE: This maximizes client
            # VFS cache hit rate and is far more efficient than launching multiple
            # GetFile flows.
            self.state.files_to_fetch += 1

            for i in range(expected_number_of_hashes):
                if i == expected_number_of_hashes - 1:
                    # The last chunk is short.
                    length = file_tracker["size_to_download"] % self.CHUNK_SIZE
                else:
                    length = self.CHUNK_SIZE

                self.CallClient(server_stubs.HashBuffer,
                                pathspec=file_tracker["stat_entry"].pathspec,
                                offset=i * self.CHUNK_SIZE,
                                length=length,
                                next_state="CheckHash",
                                request_data=dict(index=index))

        if self.state.files_hashed % 100 == 0:
            self.Log("Hashed %d files, skipped %s already stored.",
                     self.state.files_hashed, self.state.files_skipped)
Exemplo n.º 26
0
  def AssignTasksToClient(self, client_id):
    """Examines our rules and starts up flows based on the client.

    Args:
      client_id: Client id of the client for tasks to be assigned.

    Returns:
      Number of assigned tasks.
    """
    rules = self.Get(self.Schema.RULES)
    if not rules:
      return 0

    if data_store.RelationalDBReadEnabled():
      last_foreman_run = self._GetLastForemanRunTimeRelational(client_id)
    else:
      last_foreman_run = self._GetLastForemanRunTime(client_id)

    latest_rule = max(rule.created for rule in rules)

    if latest_rule <= last_foreman_run:
      return 0

    # Update the latest checked rule on the client.
    if data_store.RelationalDBWriteEnabled():
      try:
        self._SetLastForemanRunTimeRelational(client_id, latest_rule)
      except db.UnknownClientError:
        pass

    # If the relational db is used for reads, we don't have to update the
    # aff4 object.
    if not data_store.RelationalDBReadEnabled():
      self._SetLastForemanRunTime(client_id, latest_rule)

    relevant_rules = []
    expired_rules = False

    now = time.time() * 1e6

    for rule in rules:
      if rule.expires < now:
        expired_rules = True
        continue
      if rule.created <= int(last_foreman_run):
        continue

      relevant_rules.append(rule)

    if data_store.RelationalDBReadEnabled():
      client_data = data_store.REL_DB.ReadClientFullInfo(client_id)
      if client_data is None:
        return
    else:
      client_data = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)

    actions_count = 0
    for rule in relevant_rules:
      if self._EvaluateRules(rule, client_data):
        actions_count += self._RunActions(rule, client_id)

    if expired_rules:
      self.ExpireRules()

    return actions_count
Exemplo n.º 27
0
    def Handle(self, args, token=None):
        start_time = args.start
        end_time = args.end

        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()

        if not start_time:
            start_time = end_time - rdfvalue.Duration("30m")

        if data_store.RelationalDBReadEnabled("client_stats"):
            stat_values = data_store.REL_DB.ReadClientStats(
                client_id=str(args.client_id),
                min_timestamp=start_time,
                max_timestamp=end_time)
        else:
            fd = aff4.FACTORY.Create(args.client_id.ToClientURN().Add("stats"),
                                     aff4_type=aff4_stats.ClientStats,
                                     mode="r",
                                     token=token,
                                     age=(start_time, end_time))

            stat_values = list(fd.GetValuesForAttribute(fd.Schema.STATS))
        points = []
        for stat_value in reversed(stat_values):
            if args.metric == args.Metric.CPU_PERCENT:
                points.extend((s.cpu_percent, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_SYSTEM:
                points.extend((s.system_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_USER:
                points.extend((s.user_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.IO_READ_BYTES:
                points.extend(
                    (s.read_bytes, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_BYTES:
                points.extend((s.write_bytes, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_READ_OPS:
                points.extend(
                    (s.read_count, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_OPS:
                points.extend((s.write_count, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED:
                points.append((stat_value.bytes_received, stat_value.age))
            elif args.metric == args.Metric.NETWORK_BYTES_SENT:
                points.append((stat_value.bytes_sent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_PERCENT:
                points.append((stat_value.memory_percent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_RSS_SIZE:
                points.append((stat_value.RSS_size, stat_value.age))
            elif args.metric == args.Metric.MEMORY_VMS_SIZE:
                points.append((stat_value.VMS_size, stat_value.age))
            else:
                raise ValueError("Unknown metric.")

        # Points collected from "cpu_samples" and "io_samples" may not be correctly
        # sorted in some cases (as overlaps between different stat_values are
        # possible).
        points.sort(key=lambda x: x[1])

        ts = timeseries.Timeseries()
        ts.MultiAppend(points)

        if args.metric not in self.GAUGE_METRICS:
            ts.MakeIncreasing()

        if len(stat_values) > self.MAX_SAMPLES:
            sampling_interval = rdfvalue.Duration.FromSeconds(
                ((end_time - start_time).seconds // self.MAX_SAMPLES) or 1)
            if args.metric in self.GAUGE_METRICS:
                mode = timeseries.NORMALIZE_MODE_GAUGE
            else:
                mode = timeseries.NORMALIZE_MODE_COUNTER

            ts.Normalize(sampling_interval, start_time, end_time, mode=mode)

        result = ApiGetClientLoadStatsResult()
        for value, timestamp in ts.data:
            dp = api_stats.ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                        value=float(value))
            result.data_points.append(dp)

        return result
Exemplo n.º 28
0
def Notify(username, notification_type, message, object_reference):
    if data_store.RelationalDBReadEnabled():
        _Notify(username, notification_type, message, object_reference)

    _NotifyLegacy(username, notification_type, message, object_reference)
Exemplo n.º 29
0
 def Handle(self, args, token=None):
     if data_store.RelationalDBReadEnabled():
         return self.HandleRelationalDB(args, token=token)
     else:
         return self.HandleLegacy(args, token=token)
Exemplo n.º 30
0
    def testLiteralExpressionIsProcessedCorrectly(self):
        """Literals are raw bytes. Testing that raw bytes are processed right."""

        # Open up and click on View Hunts.
        self.Open("/")
        self.Click("css=a[grrtarget=hunts]")

        # Open up "New Hunt" wizard
        self.Click("css=button[name=NewHunt]")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('What to run?')")

        # Click on Filesystem item in flows list
        self.WaitUntil(self.IsElementPresent,
                       "css=#_Filesystem > i.jstree-icon")
        self.Click("css=#_Filesystem > i.jstree-icon")

        # Click on the FileFinder item in Filesystem flows list
        self.Click("link=File Finder")

        self.Click("css=label:contains('Conditions') ~ * button")
        self.Select("css=label:contains('Condition type') ~ * select",
                    "Contents literal match")
        self.Type("css=label:contains('Literal') ~ * input",
                  "foo\\x0d\\xc8bar")

        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Hunt parameters')")
        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(
            self.IsElementPresent,
            "css=grr-wizard-form:contains('How to process results')")
        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Where to run?')")
        # Click on "Next" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Review')")

        # Check that the arguments summary is present.
        self.WaitUntil(
            self.IsElementPresent, "css=grr-wizard-form:contains('%s')" %
            file_finder.FileFinder.__name__)
        self.WaitUntil(self.IsTextPresent, b"foo\\x0d\\xc8bar")

        # Click on "Run" button
        self.Click("css=grr-new-hunt-wizard-form button.Next")
        self.WaitUntil(self.IsElementPresent,
                       "css=grr-wizard-form:contains('Created Hunt')")
        # Close the window and check that the hunt was created.
        self.Click("css=button.Next")

        # Check that the hunt object was actually created
        if data_store.RelationalDBReadEnabled():
            hunts_list = sorted(data_store.REL_DB.ReadHuntObjects(offset=0,
                                                                  count=10),
                                key=lambda x: x.create_time)
            self.assertLen(hunts_list, 1)

            # Check that the hunt was created with a correct literal value.
            hunt = hunts_list[0]
            self.assertEqual(hunt.args.standard.flow_name,
                             file_finder.FileFinder.__name__)
            self.assertEqual(
                hunt.args.standard.flow_args.conditions[0].
                contents_literal_match.literal, b"foo\x0d\xc8bar")
        else:
            hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
            hunts_list = list(hunts_root.OpenChildren())
            self.assertLen(hunts_list, 1)

            # Check that the hunt was created with a correct literal value.
            hunt = hunts_list[0]
            self.assertEqual(hunt.args.flow_runner_args.flow_name,
                             file_finder.FileFinder.__name__)
            self.assertEqual(
                hunt.args.flow_args.conditions[0].contents_literal_match.
                literal, b"foo\x0d\xc8bar")