Пример #1
0
    def VerifyMessageSignature(self, response_comms, signed_message_list,
                               cipher, cipher_verified, api_version,
                               remote_public_key):
        """Verifies the message list signature.

    In the server we check that the timestamp is later than the ping timestamp
    stored with the client. This ensures that client responses can not be
    replayed.

    Args:
      response_comms: The raw response_comms rdfvalue.
      signed_message_list: The SignedMessageList rdfvalue from the server.
      cipher: The cipher object that should be used to verify the message.
      cipher_verified: If True, the cipher's signature is not verified again.
      api_version: The api version we should use.
      remote_public_key: The public key of the source.
    Returns:
      An rdf_flows.GrrMessage.AuthorizationState.
    """
        if (not cipher_verified
                and not cipher.VerifyCipherSignature(remote_public_key)):
            stats.STATS.IncrementCounter("grr_unauthenticated_messages")
            return rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED

        try:
            client_id = cipher.cipher_metadata.source
            try:
                client = self.client_cache.Get(client_id)
            except KeyError:
                client = aff4.FACTORY.Create(
                    client_id,
                    aff4.AFF4Object.classes["VFSGRRClient"],
                    mode="rw",
                    token=self.token)
                self.client_cache.Put(client_id, client)
                stats.STATS.SetGaugeValue(
                    "grr_frontendserver_client_cache_size",
                    len(self.client_cache))

            ip = response_comms.orig_request.source_ip
            client.Set(client.Schema.CLIENT_IP(ip))

            # The very first packet we see from the client we do not have its clock
            remote_time = client.Get(client.Schema.CLOCK) or 0
            client_time = signed_message_list.timestamp or 0

            # This used to be a strict check here so absolutely no out of
            # order messages would be accepted ever. Turns out that some
            # proxies can send your request with some delay even if the
            # client has already timed out (and sent another request in
            # the meantime, making the first one out of order). In that
            # case we would just kill the whole flow as a
            # precaution. Given the behavior of those proxies, this seems
            # now excessive and we have changed the replay protection to
            # only trigger on messages that are more than one hour old.

            if client_time < long(remote_time - rdfvalue.Duration("1h")):
                logging.warning("Message desynchronized for %s: %s >= %s",
                                client_id, long(remote_time), int(client_time))
                # This is likely an old message
                return rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED

            stats.STATS.IncrementCounter("grr_authenticated_messages")

            # Update the client and server timestamps only if the client
            # time moves forward.
            if client_time > long(remote_time):
                client.Set(client.Schema.CLOCK,
                           rdfvalue.RDFDatetime(client_time))
                client.Set(client.Schema.PING, rdfvalue.RDFDatetime.Now())
                for label in client.Get(client.Schema.LABELS, []):
                    stats.STATS.IncrementCounter("client_pings_by_label",
                                                 fields=[label.name])
            else:
                logging.warning("Out of order message for %s: %s >= %s",
                                client_id, long(remote_time), int(client_time))

            client.Flush(sync=False)

        except communicator.UnknownClientCert:
            pass

        return rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED
Пример #2
0
from grr.server.aff4_objects import aff4_grr
from grr.server.aff4_objects import standard as aff4_standard
from grr.server.aff4_objects import users
from grr.server.flows.general import processes
from grr.server.flows.general import transfer
from grr.server.hunts import implementation
from grr.server.hunts import standard
from grr.server.hunts import standard_test
from grr.test_lib import acl_test_lib
from grr.test_lib import action_mocks
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib

# A increasing sequence of times.
TIME_0 = test_lib.FIXED_TIME
TIME_1 = TIME_0 + rdfvalue.Duration("1d")
TIME_2 = TIME_1 + rdfvalue.Duration("1d")


def DateString(t):
    return t.Format("%Y-%m-%d")


def DateTimeString(t):
    return t.Format("%Y-%m-%d %H:%M:%S")


def CreateFileVersions(token):
    """Add new versions for a file."""
    # This file already exists in the fixture at TIME_0, we write a
    # later version.
Пример #3
0
from grr.server.grr_response_server import aff4
from grr.server.grr_response_server import artifact
from grr.server.grr_response_server import client_index
from grr.server.grr_response_server import data_store
from grr.server.grr_response_server import email_alerts
from grr.server.grr_response_server.aff4_objects import aff4_grr
from grr.server.grr_response_server.aff4_objects import filestore
from grr.server.grr_response_server.aff4_objects import users
from grr.server.grr_response_server.flows.general import audit

from grr.server.grr_response_server.hunts import results as hunts_results
from grr.server.grr_response_server.rdfvalues import objects

from grr.test_lib import testing_startup

FIXED_TIME = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("8d")
TEST_CLIENT_ID = rdf_client.ClientURN("C.1000000000000000")


class GRRBaseTest(unittest.TestCase):
    """This is the base class for all GRR tests."""

    use_relational_reads = False

    def __init__(self, methodName=None):  # pylint: disable=g-bad-name
        """Hack around unittest's stupid constructor.

    We sometimes need to instantiate the test suite without running any tests -
    e.g. to start initialization or setUp() functions. The unittest constructor
    requires to provide a valid method name.
Пример #4
0
class HuntApprovals30(HuntApprovals):
  """Last month's hunt approvals."""
  category = "/Server/Approvals/Hunts/ 30 days"
  title = "Hunt approval requests and grants for the last 30 days"
  time_offset = rdfvalue.Duration("30d")
Пример #5
0
class HuntActions30(HuntActions):
  """Last month's hunt actions."""
  category = "/Server/Hunts/ 30 days"
  title = "Hunt management actions for the last 30 days"
  time_offset = rdfvalue.Duration("30d")
Пример #6
0
    def testStopping(self):
        """Tests if we can stop a hunt."""

        foreman = aff4.FACTORY.Open("aff4:/foreman",
                                    mode="rw",
                                    token=self.token)
        rules = foreman.Get(foreman.Schema.RULES)

        # Make sure there are no rules yet.
        self.assertEqual(len(rules), 0)
        now = rdfvalue.RDFDatetime.Now()
        expires = rdfvalue.Duration("1h").Expiry()
        # Add some rules.
        rules = [
            rdf_foreman.ForemanRule(created=now,
                                    expires=expires,
                                    description="Test rule1"),
            rdf_foreman.ForemanRule(created=now,
                                    expires=expires,
                                    description="Test rule2")
        ]
        self.AddForemanRules(rules)

        client_rule_set = rdf_foreman.ForemanClientRuleSet(rules=[
            rdf_foreman.ForemanClientRule(
                rule_type=rdf_foreman.ForemanClientRule.Type.REGEX,
                regex=rdf_foreman.ForemanRegexClientRule(
                    attribute_name="GRR client", attribute_regex="HUNT")),
            rdf_foreman.ForemanClientRule(
                rule_type=rdf_foreman.ForemanClientRule.Type.INTEGER,
                integer=rdf_foreman.ForemanIntegerClientRule(
                    attribute_name="Clock",
                    operator=rdf_foreman.ForemanIntegerClientRule.Operator.
                    GREATER_THAN,
                    value=1336650631137737))
        ])

        hunt = implementation.GRRHunt.StartHunt(
            hunt_name=standard.SampleHunt.__name__,
            client_rule_set=client_rule_set,
            client_rate=0,
            token=self.token)

        with hunt:
            runner = hunt.GetRunner()
            runner.Start()

            # Add some more rules.
            rules = [
                rdf_foreman.ForemanRule(created=now,
                                        expires=expires,
                                        description="Test rule3"),
                rdf_foreman.ForemanRule(created=now,
                                        expires=expires,
                                        description="Test rule4")
            ]
            self.AddForemanRules(rules)

            foreman = aff4.FACTORY.Open("aff4:/foreman",
                                        mode="rw",
                                        token=self.token)
            rules = foreman.Get(foreman.Schema.RULES)
            self.assertEqual(len(rules), 5)

            # It should be running.
            self.assertTrue(runner.IsHuntStarted())

            # Now we stop the hunt.
            hunt.Stop()

        foreman = aff4.FACTORY.Open("aff4:/foreman",
                                    mode="rw",
                                    token=self.token)
        rules = foreman.Get(foreman.Schema.RULES)
        # The rule for this hunt should be deleted but the rest should be there.
        self.assertEqual(len(rules), 4)

        # And the hunt should report no outstanding requests any more.
        with hunt:
            self.assertFalse(hunt.GetRunner().IsHuntStarted())
Пример #7
0
    def testVersionDropDownChangesFileContentAndDownloads(self):
        """Test the fileview interface."""

        self.Open("/#/clients/%s" % self.client_id)

        # Go to Browse VFS.
        self.Click("css=a[grrtarget='client.vfs']")

        self.Click("css=#_fs i.jstree-icon")
        self.Click("css=#_fs-os i.jstree-icon")
        self.Click("css=#_fs-os-c i.jstree-icon")

        # Test file versioning.
        self.WaitUntil(self.IsElementPresent, "css=#_fs-os-c-Downloads")
        self.Click("link=Downloads")

        # Verify that we have the latest version in the table by default.
        self.assertTrue(
            gui_test_lib.DateString(gui_test_lib.TIME_2) in self.GetText(
                "css=tr:contains(\"a.txt\")"))

        # Click on the row.
        self.Click("css=tr:contains(\"a.txt\")")
        self.WaitUntilContains("a.txt", self.GetText,
                               "css=div#main_bottomPane h1")
        self.WaitUntilContains("HEAD", self.GetText,
                               "css=.version-dropdown > option[selected]")
        self.WaitUntilContains(gui_test_lib.DateString(gui_test_lib.TIME_2),
                               self.GetText,
                               "css=.version-dropdown > option:nth(1)")

        # Check the data in this file.
        self.Click("css=li[heading=TextView]")
        self.WaitUntilContains("Goodbye World", self.GetText,
                               "css=div.monospace pre")

        downloaded_files = []

        def FakeDownloadHandle(unused_self, args, token=None):
            _ = token  # Avoid unused variable linter warnings.
            aff4_path = args.client_id.ToClientURN().Add(args.file_path)
            age = args.timestamp or aff4.NEWEST_TIME
            downloaded_files.append((aff4_path, age))

            return api_call_handler_base.ApiBinaryStream(
                filename=aff4_path.Basename(), content_generator=xrange(42))

        with utils.Stubber(api_vfs.ApiGetFileBlobHandler, "Handle",
                           FakeDownloadHandle):
            # Try to download the file.
            self.Click("css=li[heading=Download]")

            self.WaitUntilContains(
                gui_test_lib.DateTimeString(gui_test_lib.TIME_2), self.GetText,
                "css=grr-file-download-view")
            self.Click("css=button:contains(\"Download\")")

            # Select the previous version.
            self.Click(
                "css=select.version-dropdown > option:contains(\"%s\")" %
                gui_test_lib.DateString(gui_test_lib.TIME_1))

            # Now we should have a different time.
            self.WaitUntilContains(
                gui_test_lib.DateTimeString(gui_test_lib.TIME_1), self.GetText,
                "css=grr-file-download-view")
            self.Click("css=button:contains(\"Download\")")

            self.WaitUntil(self.IsElementPresent, "css=li[heading=TextView]")

            # the FakeDownloadHandle method was actually called four times, since
            # a file download first sends a HEAD request to check user access.
            self.WaitUntil(lambda: len(downloaded_files) == 4)

        # Both files should be the same...
        self.assertEqual(downloaded_files[0][0],
                         u"aff4:/%s/fs/os/c/Downloads/a.txt" % self.client_id)
        self.assertEqual(downloaded_files[2][0],
                         u"aff4:/%s/fs/os/c/Downloads/a.txt" % self.client_id)
        # But from different times. The downloaded file timestamp is only accurate
        # to the nearest second. Also, the HEAD version of the file is downloaded
        # with age=NEWEST_TIME.
        self.assertEqual(downloaded_files[0][1], aff4.NEWEST_TIME)
        self.assertAlmostEqual(downloaded_files[2][1],
                               gui_test_lib.TIME_1,
                               delta=rdfvalue.Duration("1s"))

        self.Click("css=li[heading=TextView]")

        # Make sure the file content has changed. This version has "Hello World" in
        # it.
        self.WaitUntilContains("Hello World", self.GetText,
                               "css=div.monospace pre")
Пример #8
0
 class NoRandom(cronjobs.SystemCronFlow):
     frequency = rdfvalue.Duration("1d")
     lifetime = rdfvalue.Duration("12h")
     start_time_randomization = False
Пример #9
0
    def Handle(self, args, token):
        stats_store = aff4.FACTORY.Create(
            stats_store_lib.StatsStore.DATA_STORE_ROOT,
            aff4_type=stats_store_lib.StatsStore,
            mode="rw",
            token=token)

        process_ids = stats_store.ListUsedProcessIds()
        filtered_ids = [
            pid for pid in process_ids
            if pid.startswith(args.component.name.lower())
        ]

        start_time = args.start
        end_time = args.end

        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()

        if not start_time:
            start_time = end_time - rdfvalue.Duration("1h")

        # Run for a little extra time at the start. This improves the quality of the
        # first data points of counter metrics which don't appear in every interval.
        base_start_time = start_time
        # pylint: disable=g-no-augmented-assignment
        start_time = start_time - rdfvalue.Duration("10m")
        # pylint: enable=g-no-augmented-assignment

        if end_time <= start_time:
            raise ValueError("End time can't be less than start time.")

        result = ApiStatsStoreMetric(start=base_start_time,
                                     end=end_time,
                                     metric_name=args.metric_name)

        data = stats_store.MultiReadStats(process_ids=filtered_ids,
                                          metric_name=utils.SmartStr(
                                              args.metric_name),
                                          timestamp=(start_time, end_time))

        if not data:
            return result

        pid = data.keys()[0]
        metadata = stats_store.ReadMetadata(process_id=pid)
        metric_metadata = metadata.AsDict()[args.metric_name]

        query = stats_store_lib.StatsStoreDataQuery(data)
        query.In(args.component.name.lower() + ".*").In(args.metric_name)
        if metric_metadata.fields_defs:
            query.InAll()

        requested_duration = end_time - start_time
        if requested_duration >= rdfvalue.Duration("1d"):
            sampling_duration = rdfvalue.Duration("5m")
        elif requested_duration >= rdfvalue.Duration("6h"):
            sampling_duration = rdfvalue.Duration("1m")
        else:
            sampling_duration = rdfvalue.Duration("30s")

        if metric_metadata.metric_type == metric_metadata.MetricType.COUNTER:
            query.TakeValue().MakeIncreasing().Normalize(
                sampling_duration,
                start_time,
                end_time,
                mode=timeseries.NORMALIZE_MODE_COUNTER)
        elif metric_metadata.metric_type == metric_metadata.MetricType.EVENT:
            if args.distribution_handling_mode == "DH_SUM":
                query.TakeDistributionSum()
            elif args.distribution_handling_mode == "DH_COUNT":
                query.TakeDistributionCount()
            else:
                raise ValueError(
                    "Unexpected request.distribution_handling_mode "
                    "value: %s." % args.distribution_handling_mode)
            query.MakeIncreasing()
            query.Normalize(sampling_duration,
                            start_time,
                            end_time,
                            mode=timeseries.NORMALIZE_MODE_COUNTER)

        elif metric_metadata.metric_type == metric_metadata.MetricType.GAUGE:
            query.TakeValue().Normalize(sampling_duration, start_time,
                                        end_time)
        else:
            raise RuntimeError("Unsupported metric type.")

        if args.aggregation_mode == "AGG_SUM":
            query.AggregateViaSum()
        elif args.aggregation_mode == "AGG_MEAN":
            query.AggregateViaMean()
        elif args.aggregation_mode == "AGG_NONE":
            pass
        else:
            raise ValueError("Unexpected request.aggregation value: %s." %
                             args.aggregation)

        if (args.rate and metric_metadata.metric_type !=
                metric_metadata.MetricType.GAUGE):
            query.Rate()

        query.InTimeRange(base_start_time, end_time)

        for value, timestamp in query.ts.data:
            if value is not None:
                result.data_points.append(
                    ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                 value=value))

        return result
Пример #10
0
  def testVersionDropDownChangesFileContentAndDownloads(self):
    """Test the fileview interface."""

    # Set up multiple version for an attribute on the client for tests.
    with self.ACLChecksDisabled():
      for fake_time, hostname in [(TIME_0, "HostnameV1"),
                                  (TIME_1, "HostnameV2"),
                                  (TIME_2, "HostnameV3")]:
        with test_lib.FakeTime(fake_time):
          client = aff4.FACTORY.Open(
              u"C.0000000000000001", mode="rw", token=self.token)
          client.Set(client.Schema.HOSTNAME(hostname))
          client.Close()

    self.Open("/")

    self.Type("client_query", "C.0000000000000001")
    self.Click("client_query_submit")

    self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
                        "css=span[type=subject]")

    # Choose client 1.
    self.Click("css=td:contains('0001')")

    # Go to Browse VFS.
    self.Click("css=a[grrtarget='client.vfs']")

    self.Click("css=#_fs i.jstree-icon")
    self.Click("css=#_fs-os i.jstree-icon")
    self.Click("css=#_fs-os-c i.jstree-icon")

    # Test file versioning.
    self.WaitUntil(self.IsElementPresent, "css=#_fs-os-c-Downloads")
    self.Click("link=Downloads")

    # Verify that we have the latest version in the table by default.
    self.assertTrue(
        DateString(TIME_2) in self.GetText("css=tr:contains(\"a.txt\")"))

    # Click on the row.
    self.Click("css=tr:contains(\"a.txt\")")
    self.WaitUntilContains("a.txt", self.GetText, "css=div#main_bottomPane h1")
    self.WaitUntilContains("HEAD", self.GetText,
                           "css=.version-dropdown > option[selected]")
    self.WaitUntilContains(
        DateString(TIME_2), self.GetText,
        "css=.version-dropdown > option:nth(1)")

    # Check the data in this file.
    self.Click("css=li[heading=TextView]")
    self.WaitUntilContains("Goodbye World", self.GetText,
                           "css=div.monospace pre")

    downloaded_files = []

    def FakeDownloadHandle(unused_self, args, token=None):
      _ = token  # Avoid unused variable linter warnings.
      aff4_path = args.client_id.Add(args.file_path)
      age = args.timestamp or aff4.NEWEST_TIME
      downloaded_files.append((aff4_path, age))

      return api_call_handler_base.ApiBinaryStream(
          filename=aff4_path.Basename(), content_generator=xrange(42))

    with utils.Stubber(api_vfs.ApiGetFileBlobHandler, "Handle",
                       FakeDownloadHandle):
      # Try to download the file.
      self.Click("css=li[heading=Download]")

      self.WaitUntilContains(
          DateTimeString(TIME_2), self.GetText, "css=grr-file-download-view")
      self.Click("css=button:contains(\"Download\")")

      # Select the previous version.
      self.Click("css=select.version-dropdown > option:contains(\"%s\")" %
                 DateString(TIME_1))

      # Now we should have a different time.
      self.WaitUntilContains(
          DateTimeString(TIME_1), self.GetText, "css=grr-file-download-view")
      self.Click("css=button:contains(\"Download\")")

      self.WaitUntil(self.IsElementPresent, "css=li[heading=TextView]")

      # the FakeDownloadHandle method was actually called four times, since
      # a file download first sends a HEAD request to check user access.
      self.WaitUntil(lambda: len(downloaded_files) == 4)

    # Both files should be the same...
    self.assertEqual(downloaded_files[0][0],
                     u"aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt")
    self.assertEqual(downloaded_files[2][0],
                     u"aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt")
    # But from different times. The downloaded file timestamp is only accurate
    # to the nearest second. Also, the HEAD version of the file is downloaded
    # with age=NEWEST_TIME.
    self.assertEqual(downloaded_files[0][1], aff4.NEWEST_TIME)
    self.assertAlmostEqual(
        downloaded_files[2][1], TIME_1, delta=rdfvalue.Duration("1s"))

    self.Click("css=li[heading=TextView]")

    # Make sure the file content has changed. This version has "Hello World" in
    # it.
    self.WaitUntilContains("Hello World", self.GetText, "css=div.monospace pre")
Пример #11
0
 class TestSystemCron(cronjobs.SystemCronFlow):
     frequency = rdfvalue.Duration("10m")
     lifetime = rdfvalue.Duration("12h")
Пример #12
0
    def Start(self):
        """Start state of the flow."""
        # If max_running_time is not specified, set it to 60% of this job's
        # lifetime.
        if not self.state.args.max_running_time:
            self.state.args.max_running_time = rdfvalue.Duration(
                "%ds" % int(ProcessHuntResultsCronFlow.lifetime.seconds * 0.6))

        self.start_time = rdfvalue.RDFDatetime().Now()

        exceptions_by_hunt = {}
        freeze_timestamp = rdfvalue.RDFDatetime().Now()
        for results_urn in aff4.ResultsOutputCollection.QueryNotifications(
                timestamp=freeze_timestamp, token=self.token):

            aff4.ResultsOutputCollection.DeleteNotifications(
                [results_urn], end=freeze_timestamp, token=self.token)

            # Feed the results to output plugins
            try:
                results = aff4.FACTORY.Open(
                    results_urn,
                    aff4_type="ResultsOutputCollection",
                    token=self.token)
            except aff4.InstantiationError:  # Collection does not exist.
                continue

            exceptions_by_plugin = self.ProcessHuntResults(
                results, freeze_timestamp)
            if exceptions_by_plugin:
                hunt_urn = results.Get(results.Schema.RESULTS_SOURCE)
                exceptions_by_hunt[hunt_urn] = exceptions_by_plugin

            lease_time = config_lib.CONFIG["Worker.compaction_lease_time"]
            try:
                with aff4.FACTORY.OpenWithLock(
                        results_urn,
                        blocking=False,
                        aff4_type="ResultsOutputCollection",
                        lease_time=lease_time,
                        token=self.token) as results:
                    num_compacted = results.Compact(callback=self.HeartBeat,
                                                    timestamp=freeze_timestamp)
                    stats.STATS.IncrementCounter("hunt_results_compacted",
                                                 delta=num_compacted)
                    logging.debug("Compacted %d results in %s.", num_compacted,
                                  results_urn)
            except aff4.LockError:
                logging.error(
                    "Trying to compact a collection that's already "
                    "locked: %s", results_urn)
                stats.STATS.IncrementCounter(
                    "hunt_results_compaction_locking_errors")

            if self.CheckIfRunningTooLong():
                self.Log("Running for too long, skipping rest of hunts.")
                break

        if exceptions_by_hunt:
            e = ResultsProcessingError()
            for hunt_urn, exceptions_by_plugin in exceptions_by_hunt.items():
                for plugin_name, exceptions in exceptions_by_plugin.items():
                    for exception in exceptions:
                        e.RegisterSubException(hunt_urn, plugin_name,
                                               exception)
            raise e
Пример #13
0
class ProcessHuntResultsCronFlow(cronjobs.SystemCronFlow):
    """Periodic cron flow that processes hunts results with output plugins."""
    frequency = rdfvalue.Duration("5m")
    lifetime = rdfvalue.Duration("40m")

    args_type = ProcessHuntResultsCronFlowArgs

    DEFAULT_BATCH_SIZE = 1000
    MAX_REVERSED_RESULTS = 500000

    def CheckIfRunningTooLong(self):
        if self.state.args.max_running_time:
            elapsed = (rdfvalue.RDFDatetime().Now().AsSecondsFromEpoch() -
                       self.start_time.AsSecondsFromEpoch())
            if elapsed > self.state.args.max_running_time:
                return True

        return False

    def StatusCollectionUrn(self, hunt_urn):
        return hunt_urn.Add("OutputPluginsStatus")

    def ErrorsCollectionUrn(self, hunt_urn):
        return hunt_urn.Add("OutputPluginsErrors")

    def ApplyPluginsToBatch(self, hunt_urn, plugins, batch, batch_index):
        exceptions_by_plugin = {}
        for plugin_def, plugin in plugins:
            logging.debug("Processing hunt %s with %s, batch %d", hunt_urn,
                          plugin_def.plugin_name, batch_index)

            try:
                plugin.ProcessResponses(batch)

                stats.STATS.IncrementCounter("hunt_results_ran_through_plugin",
                                             delta=len(batch),
                                             fields=[plugin_def.plugin_name])

                plugin_status = rdfvalue.OutputPluginBatchProcessingStatus(
                    plugin_descriptor=plugin_def,
                    status="SUCCESS",
                    batch_index=batch_index,
                    batch_size=len(batch))
            except Exception as e:  # pylint: disable=broad-except
                stats.STATS.IncrementCounter("hunt_output_plugin_errors",
                                             fields=[plugin_def.plugin_name])

                plugin_status = rdfvalue.OutputPluginBatchProcessingStatus(
                    plugin_descriptor=plugin_def,
                    status="ERROR",
                    summary=utils.SmartStr(e),
                    batch_index=batch_index,
                    batch_size=len(batch))

                logging.exception(
                    "Error processing hunt results: hunt %s, "
                    "plugin %s, batch %d", hunt_urn, plugin_def.plugin_name,
                    batch_index)
                self.Log("Error processing hunt results (hunt %s, "
                         "plugin %s, batch %d): %s" %
                         (hunt_urn, plugin_def.plugin_name, batch_index, e))
                exceptions_by_plugin[plugin_def] = e

            collections.PackedVersionedCollection.AddToCollection(
                self.StatusCollectionUrn(hunt_urn), [plugin_status],
                sync=False,
                token=self.token)
            if plugin_status.status == plugin_status.Status.ERROR:
                collections.PackedVersionedCollection.AddToCollection(
                    self.ErrorsCollectionUrn(hunt_urn), [plugin_status],
                    sync=False,
                    token=self.token)

        return exceptions_by_plugin

    def FlushPlugins(self, hunt_urn, plugins):
        flush_exceptions = {}
        for plugin_def, plugin in plugins:
            try:
                plugin.Flush()
            except Exception as e:  # pylint: disable=broad-except
                logging.exception(
                    "Error flushing hunt results: hunt %s, "
                    "plugin %s", hunt_urn, str(plugin))
                self.Log("Error processing hunt results (hunt %s, "
                         "plugin %s): %s" % (hunt_urn, str(plugin), e))
                flush_exceptions[plugin_def] = e

        return flush_exceptions

    def ProcessHuntResults(self, results, freeze_timestamp):
        plugins_exceptions = {}

        hunt_urn = results.Get(results.Schema.RESULTS_SOURCE)
        metadata_urn = hunt_urn.Add("ResultsMetadata")

        batch_size = self.state.args.batch_size or self.DEFAULT_BATCH_SIZE
        batches = utils.Grouper(
            results.GenerateUncompactedItems(
                max_reversed_results=self.MAX_REVERSED_RESULTS,
                timestamp=freeze_timestamp), batch_size)

        with aff4.FACTORY.Open(metadata_urn, mode="rw",
                               token=self.token) as metadata_obj:

            output_plugins = metadata_obj.Get(
                metadata_obj.Schema.OUTPUT_PLUGINS)
            num_processed = int(
                metadata_obj.Get(metadata_obj.Schema.NUM_PROCESSED_RESULTS))

            used_plugins = []
            for batch_index, batch in enumerate(batches):
                batch = list(batch)
                num_processed += len(batch)

                if not used_plugins:
                    for _, (plugin_def,
                            state) in output_plugins.data.iteritems():
                        # TODO(user): Remove as soon as migration to new-style
                        # output plugins is completed.
                        if not hasattr(plugin_def, "GetPluginForState"):
                            logging.error("Invalid plugin_def: %s", plugin_def)
                            continue

                        used_plugins.append(
                            (plugin_def, plugin_def.GetPluginForState(state)))

                batch_exceptions = self.ApplyPluginsToBatch(
                    hunt_urn, used_plugins, batch, batch_index)
                if batch_exceptions:
                    for key, value in batch_exceptions.items():
                        plugins_exceptions.setdefault(key, []).append(value)

                self.HeartBeat()

                # If this flow is working for more than max_running_time - stop
                # processing.
                if self.CheckIfRunningTooLong():
                    self.Log(
                        "Running for too long, skipping rest of batches for %s",
                        hunt_urn)
                    break

            if not used_plugins:
                logging.debug(
                    "Got notification, but no results were processed for %s.",
                    hunt_urn)

            flush_exceptions = self.FlushPlugins(hunt_urn, used_plugins)
            plugins_exceptions.update(flush_exceptions)

            metadata_obj.Set(
                metadata_obj.Schema.OUTPUT_PLUGINS(output_plugins))
            metadata_obj.Set(
                metadata_obj.Schema.NUM_PROCESSED_RESULTS(num_processed))

            return plugins_exceptions

    @flow.StateHandler()
    def Start(self):
        """Start state of the flow."""
        # If max_running_time is not specified, set it to 60% of this job's
        # lifetime.
        if not self.state.args.max_running_time:
            self.state.args.max_running_time = rdfvalue.Duration(
                "%ds" % int(ProcessHuntResultsCronFlow.lifetime.seconds * 0.6))

        self.start_time = rdfvalue.RDFDatetime().Now()

        exceptions_by_hunt = {}
        freeze_timestamp = rdfvalue.RDFDatetime().Now()
        for results_urn in aff4.ResultsOutputCollection.QueryNotifications(
                timestamp=freeze_timestamp, token=self.token):

            aff4.ResultsOutputCollection.DeleteNotifications(
                [results_urn], end=freeze_timestamp, token=self.token)

            # Feed the results to output plugins
            try:
                results = aff4.FACTORY.Open(
                    results_urn,
                    aff4_type="ResultsOutputCollection",
                    token=self.token)
            except aff4.InstantiationError:  # Collection does not exist.
                continue

            exceptions_by_plugin = self.ProcessHuntResults(
                results, freeze_timestamp)
            if exceptions_by_plugin:
                hunt_urn = results.Get(results.Schema.RESULTS_SOURCE)
                exceptions_by_hunt[hunt_urn] = exceptions_by_plugin

            lease_time = config_lib.CONFIG["Worker.compaction_lease_time"]
            try:
                with aff4.FACTORY.OpenWithLock(
                        results_urn,
                        blocking=False,
                        aff4_type="ResultsOutputCollection",
                        lease_time=lease_time,
                        token=self.token) as results:
                    num_compacted = results.Compact(callback=self.HeartBeat,
                                                    timestamp=freeze_timestamp)
                    stats.STATS.IncrementCounter("hunt_results_compacted",
                                                 delta=num_compacted)
                    logging.debug("Compacted %d results in %s.", num_compacted,
                                  results_urn)
            except aff4.LockError:
                logging.error(
                    "Trying to compact a collection that's already "
                    "locked: %s", results_urn)
                stats.STATS.IncrementCounter(
                    "hunt_results_compaction_locking_errors")

            if self.CheckIfRunningTooLong():
                self.Log("Running for too long, skipping rest of hunts.")
                break

        if exceptions_by_hunt:
            e = ResultsProcessingError()
            for hunt_urn, exceptions_by_plugin in exceptions_by_hunt.items():
                for plugin_name, exceptions in exceptions_by_plugin.items():
                    for exception in exceptions:
                        e.RegisterSubException(hunt_urn, plugin_name,
                                               exception)
            raise e
Пример #14
0
class IndexedSequentialCollection(SequentialCollection):
    """An indexed sequential collection of RDFValues.

  Adds an index to SequentialCollection, making it efficient to find the number
  of records present, and to find a particular record number.

  IMPLEMENTATION NOTE: The index is created lazily, and for records older than
    INDEX_WRITE_DELAY.
  """

    # How many records between index entries. Subclasses may change this.  The
    # full index must fit comfortably in RAM, default is meant to be reasonable
    # for collections of up to ~1b small records. (Assumes we can have ~1m index
    # points in ram, and that reading 1k records is reasonably fast.)

    INDEX_SPACING = 1024

    # An attribute name of the form "index:sc_<i>" at timestamp <t> indicates that
    # the item with record number i was stored at timestamp t. The timestamp
    # suffix is stored as the value.

    INDEX_ATTRIBUTE_PREFIX = "index:sc_"

    # The time to wait before creating an index for a record - hacky defense
    # against the correct index changing due to a late write.

    INDEX_WRITE_DELAY = rdfvalue.Duration("3m")

    def __init__(self, *args, **kwargs):
        super(IndexedSequentialCollection, self).__init__(*args, **kwargs)
        self._index = None

    def _ReadIndex(self):
        if self._index:
            return
        self._index = {0: (0, 0)}
        self._max_indexed = 0
        for (index, ts,
             suffix) in data_store.DB.CollectionReadIndex(self.collection_id):
            self._index[index] = (ts, suffix)
            self._max_indexed = max(index, self._max_indexed)

    def _MaybeWriteIndex(self, i, ts, mutation_pool):
        """Write index marker i."""
        if i > self._max_indexed and i % self.INDEX_SPACING == 0:
            # We only write the index if the timestamp is more than 5 minutes in the
            # past: hacky defense against a late write changing the count.
            if ts[0] < (rdfvalue.RDFDatetime.Now() -
                        self.INDEX_WRITE_DELAY).AsMicrosecondsSinceEpoch():
                mutation_pool.CollectionAddIndex(self.collection_id, i, ts[0],
                                                 ts[1])
                self._index[i] = ts
                self._max_indexed = max(i, self._max_indexed)

    def _IndexedScan(self, i, max_records=None):
        """Scan records starting with index i."""
        self._ReadIndex()

        # The record number that we will read next.
        idx = 0
        # The timestamp that we will start reading from.
        start_ts = 0
        if i >= self._max_indexed:
            start_ts = max((0, 0), (self._index[self._max_indexed][0],
                                    self._index[self._max_indexed][1] - 1))
            idx = self._max_indexed
        else:
            try:
                possible_idx = i - i % self.INDEX_SPACING
                start_ts = (max(0, self._index[possible_idx][0]),
                            self._index[possible_idx][1] - 1)
                idx = possible_idx
            except KeyError:
                pass

        if max_records is not None:
            max_records += i - idx

        with data_store.DB.GetMutationPool() as mutation_pool:
            for (ts, value) in self.Scan(after_timestamp=start_ts,
                                         max_records=max_records,
                                         include_suffix=True):
                self._MaybeWriteIndex(idx, ts, mutation_pool)
                if idx >= i:
                    yield (idx, ts, value)
                idx += 1

    def GenerateItems(self, offset=0):
        for (_, _, value) in self._IndexedScan(offset):
            yield value

    def __getitem__(self, index):
        if index >= 0:
            for (_, _, value) in self._IndexedScan(index, max_records=1):
                return value
            raise IndexError("collection index out of range")
        else:
            raise RuntimeError("Index must be >= 0")

    def CalculateLength(self):
        self._ReadIndex()
        highest_index = None
        for (i, _, _) in self._IndexedScan(self._max_indexed):
            highest_index = i
        if highest_index is None:
            return 0
        return highest_index + 1

    def __len__(self):
        return self.CalculateLength()

    def UpdateIndex(self):
        self._ReadIndex()
        for _ in self._IndexedScan(self._max_indexed):
            pass

    @classmethod
    def StaticAdd(cls,
                  collection_urn,
                  rdf_value,
                  timestamp=None,
                  suffix=None,
                  mutation_pool=None):
        r = super(IndexedSequentialCollection,
                  cls).StaticAdd(collection_urn,
                                 rdf_value,
                                 timestamp=timestamp,
                                 suffix=suffix,
                                 mutation_pool=mutation_pool)
        if random.randint(0, cls.INDEX_SPACING) == 0:
            BACKGROUND_INDEX_UPDATER.AddIndexToUpdate(cls, collection_urn)
        return r
Пример #15
0
class VerifyHuntOutputPluginsCronFlow(cronjobs.SystemCronFlow):
    """Runs Verify() method of output plugins of active hunts."""

    frequency = rdfvalue.Duration("4h")
    lifetime = rdfvalue.Duration("4h")

    args_type = VerifyHuntOutputPluginsCronFlowArgs

    NON_VERIFIABLE = "NON_VERIFIABLE"

    def _GroupHuntsAndPluginsByVerifiers(self, hunts):
        """Opens hunts results metadata in bulk and groups the by verifier type.

    We've traded simplicity for performance here. Initial implementations of
    VerifyHuntOutputPluginsCronFlow checked the hunts one-by-one, but that
    turned out to be too slow and inefficient when many hunts had to be
    checked. To make the checks more effective, MultiVerifyHuntOutput()
    method was introduced in the verifiers API.

    It's this cron flow's responsibility to group the plugin objects by
    verifier type, so that we can feed them to MultiVerifyHuntOutput.

    Args:
      hunts: A list of GRRHunt objects.

    Returns:
      A dictionary where keys are verifier classes and values are lists of
      tuples (plugin id, plugin descriptor, plugin object, hunt object).
      Special constant NON_VERIFIABLE is used as a key for plugins that
      have no corresponding verifier.
    """
        hunts_by_urns = {}
        for hunt in hunts:
            hunts_by_urns[hunt.urn] = hunt

        results_metadata_urns = [hunt.results_metadata_urn for hunt in hunts]
        results_metadata_objects = aff4.FACTORY.MultiOpen(
            results_metadata_urns,
            aff4_type=implementation.HuntResultsMetadata,
            token=self.token)

        results = {}
        for mdata in results_metadata_objects:
            hunt_urn = rdfvalue.RDFURN(mdata.urn.Dirname())
            hunt = hunts_by_urns[hunt_urn]

            for plugin_id, (plugin_descriptor, plugin_state) in mdata.Get(
                    mdata.Schema.OUTPUT_PLUGINS, {}).items():

                plugin_obj = plugin_descriptor.GetPluginForState(plugin_state)
                plugin_verifiers_classes = plugin_descriptor.GetPluginVerifiersClasses(
                )

                if not plugin_verifiers_classes:
                    results.setdefault(self.NON_VERIFIABLE, []).append(
                        (plugin_id, plugin_descriptor, plugin_obj, hunt))
                else:
                    for cls in plugin_verifiers_classes:
                        results.setdefault(cls, []).append(
                            (plugin_id, plugin_descriptor, plugin_obj, hunt))

        return results

    def _FillResult(self, result, plugin_id, plugin_descriptor):
        result.timestamp = rdfvalue.RDFDatetime.Now()
        result.plugin_id = plugin_id
        result.plugin_descriptor = plugin_descriptor
        return result

    def _VerifyHunts(self, hunts_plugins_by_verifier):
        results_by_hunt = {}

        errors = []
        for verifier_cls, hunts_plugins in hunts_plugins_by_verifier.items():

            if verifier_cls == self.NON_VERIFIABLE:
                for plugin_id, plugin_descriptor, plugin_obj, hunt in hunts_plugins:
                    result = output_plugin.OutputPluginVerificationResult(
                        status=output_plugin.OutputPluginVerificationResult.
                        Status.N_A,
                        status_message=("Plugin %s is not verifiable." %
                                        plugin_obj.__class__.__name__))
                    self._FillResult(result, plugin_id, plugin_descriptor)

                    results_by_hunt.setdefault(hunt.urn, []).append(result)
                    stats.STATS.IncrementCounter(
                        "hunt_output_plugin_verifications",
                        fields=[utils.SmartStr(result.status)])
                continue

            verifier = verifier_cls()

            plugins_hunts_pairs = []
            for plugin_id, plugin_descriptor, plugin_obj, hunt in hunts_plugins:
                plugins_hunts_pairs.append((plugin_obj, hunt))

            try:
                for hunt_urn, result in verifier.MultiVerifyHuntOutput(
                        plugins_hunts_pairs):
                    self._FillResult(result, plugin_id, plugin_descriptor)

                    results_by_hunt.setdefault(hunt.urn, []).append(result)
                    stats.STATS.IncrementCounter(
                        "hunt_output_plugin_verifications",
                        fields=[utils.SmartStr(result.status)])

            except output_plugin.MultiVerifyHuntOutputError as e:
                logging.exception(e)

                errors.extend(e.errors)
                stats.STATS.IncrementCounter(
                    "hunt_output_plugin_verification_errors",
                    delta=len(e.errors))

        for hunt_urn, results in results_by_hunt.items():
            yield hunt_urn, results

        if errors:
            raise MultiHuntVerificationSummaryError(errors)

    def _WriteVerificationResults(self, hunt_urn, results):
        with aff4.FACTORY.Create(hunt_urn.Add("ResultsMetadata"),
                                 aff4_type=implementation.HuntResultsMetadata,
                                 mode="w",
                                 token=self.token) as results_metadata:
            results_metadata.Set(
                results_metadata.Schema.OUTPUT_PLUGINS_VERIFICATION_RESULTS,
                output_plugin.OutputPluginVerificationResultsList(
                    results=results))

    @flow.StateHandler()
    def Start(self):
        hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)

        if not self.args.check_range:
            self.args.check_range = rdfvalue.Duration(
                "%ds" % int(self.__class__.frequency.seconds * 2))

        range_end = rdfvalue.RDFDatetime.Now()
        range_start = rdfvalue.RDFDatetime.Now() - self.args.check_range

        children_urns = list(
            hunts_root.ListChildren(age=(range_start, range_end)))
        children_urns.sort(key=operator.attrgetter("age"), reverse=True)

        self.Log("Will verify %d hunts." % len(children_urns))

        hunts_to_process = []
        for hunt in hunts_root.OpenChildren(children_urns):
            # Skip non-GenericHunts.
            if not isinstance(hunt, GenericHunt):
                self.Log("Skipping: %s." % utils.SmartStr(hunt.urn))
                continue

            hunts_to_process.append(hunt)

        hunts_by_verifier = self._GroupHuntsAndPluginsByVerifiers(
            hunts_to_process)
        for hunt_urn, results in self._VerifyHunts(hunts_by_verifier):
            self._WriteVerificationResults(hunt_urn, results)
Пример #16
0
 def GenerateSample(self, number=5):
   return rdfvalue.Duration("%ds" % number)
Пример #17
0
class ProcessHuntResultCollectionsCronFlow(cronjobs.SystemCronFlow):
  """Periodic cron flow that processes hunt results.

  The ProcessHuntResultCollectionsCronFlow reads hunt results stored in
  HuntResultCollections and feeds runs output plugins on them.
  """

  frequency = rdfvalue.Duration("5m")
  lifetime = rdfvalue.Duration("40m")
  allow_overruns = True

  args_type = ProcessHuntResultCollectionsCronFlowArgs

  DEFAULT_BATCH_SIZE = 5000

  def CheckIfRunningTooLong(self):
    if self.args.max_running_time:
      elapsed = (rdfvalue.RDFDatetime.Now().AsSecondsFromEpoch() -
                 self.start_time.AsSecondsFromEpoch())
      if elapsed > self.args.max_running_time:
        return True
    return False

  def LoadPlugins(self, metadata_obj):
    output_plugins = metadata_obj.Get(metadata_obj.Schema.OUTPUT_PLUGINS)
    if not output_plugins:
      return output_plugins, []

    output_plugins = output_plugins.ToDict()
    used_plugins = []
    unused_plugins = []

    for plugin_def, state in output_plugins.itervalues():
      if not hasattr(plugin_def, "GetPluginForState"):
        logging.error("Invalid plugin_def: %s", plugin_def)
        continue
      used_plugins.append((plugin_def, plugin_def.GetPluginForState(state)))
    return output_plugins, used_plugins

  def RunPlugins(self, hunt_urn, plugins, results, exceptions_by_plugin):
    for plugin_def, plugin in plugins:
      try:
        plugin.ProcessResponses(results)
        plugin.Flush()

        plugin_status = output_plugin.OutputPluginBatchProcessingStatus(
            plugin_descriptor=plugin_def,
            status="SUCCESS",
            batch_size=len(results))
        stats.STATS.IncrementCounter(
            "hunt_results_ran_through_plugin",
            delta=len(results),
            fields=[plugin_def.plugin_name])

      except Exception as e:  # pylint: disable=broad-except
        logging.exception("Error processing hunt results: hunt %s, "
                          "plugin %s", hunt_urn, utils.SmartStr(plugin))
        self.Log("Error processing hunt results (hunt %s, "
                 "plugin %s): %s" % (hunt_urn, utils.SmartStr(plugin), e))
        stats.STATS.IncrementCounter(
            "hunt_output_plugin_errors", fields=[plugin_def.plugin_name])

        plugin_status = output_plugin.OutputPluginBatchProcessingStatus(
            plugin_descriptor=plugin_def,
            status="ERROR",
            summary=utils.SmartStr(e),
            batch_size=len(results))
        exceptions_by_plugin.setdefault(plugin_def, []).append(e)

      aff4.FACTORY.Open(
          hunt_urn.Add("OutputPluginsStatus"),
          hunts_implementation.PluginStatusCollection,
          mode="w",
          token=self.token).Add(plugin_status)
      if plugin_status.status == plugin_status.Status.ERROR:
        aff4.FACTORY.Open(
            hunt_urn.Add("OutputPluginsErrors"),
            hunts_implementation.PluginStatusCollection,
            mode="w",
            token=self.token).Add(plugin_status)

  def ProcessOneHunt(self, exceptions_by_hunt):
    """Reads results for one hunt and process them."""
    hunt_results_urn, results = (
        hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
            start_time=self.args.start_processing_time,
            token=self.token,
            lease_time=self.lifetime))
    logging.debug("Found %d results for hunt %s", len(results),
                  hunt_results_urn)
    if not results:
      return 0

    hunt_urn = rdfvalue.RDFURN(hunt_results_urn.Dirname())
    batch_size = self.args.batch_size or self.DEFAULT_BATCH_SIZE
    metadata_urn = hunt_urn.Add("ResultsMetadata")
    exceptions_by_plugin = {}
    num_processed_for_hunt = 0
    with aff4.FACTORY.OpenWithLock(
        hunt_results_urn,
        aff4_type=hunts_results.HuntResultCollection,
        lease_time=600,
        token=self.token) as collection_obj:
      with aff4.FACTORY.OpenWithLock(
          metadata_urn, lease_time=600, token=self.token) as metadata_obj:
        all_plugins, used_plugins = self.LoadPlugins(metadata_obj)
        num_processed = int(
            metadata_obj.Get(metadata_obj.Schema.NUM_PROCESSED_RESULTS))
        for batch in utils.Grouper(results, batch_size):
          results = list(
              collection_obj.MultiResolve([(ts, suffix)
                                           for (_, ts, suffix) in batch]))
          self.RunPlugins(hunt_urn, used_plugins, results, exceptions_by_plugin)

          hunts_results.HuntResultQueue.DeleteNotifications(
              [record_id for (record_id, _, _) in batch], token=self.token)
          num_processed += len(batch)
          num_processed_for_hunt += len(batch)
          self.HeartBeat()
          collection_obj.UpdateLease(600)
          metadata_obj.Set(
              metadata_obj.Schema.NUM_PROCESSED_RESULTS(num_processed))
          metadata_obj.UpdateLease(600)
          if self.CheckIfRunningTooLong():
            logging.warning("Run too long, stopping.")
            break

        metadata_obj.Set(metadata_obj.Schema.OUTPUT_PLUGINS(all_plugins))
        metadata_obj.Set(
            metadata_obj.Schema.NUM_PROCESSED_RESULTS(num_processed))

    if exceptions_by_plugin:
      for plugin, exceptions in exceptions_by_plugin.items():
        exceptions_by_hunt.setdefault(hunt_urn, {}).setdefault(
            plugin, []).extend(exceptions)

    logging.debug("Processed %d results.", num_processed_for_hunt)
    return len(results)

  @flow.StateHandler()
  def Start(self):
    self.start_time = rdfvalue.RDFDatetime.Now()

    exceptions_by_hunt = {}
    if not self.args.max_running_time:
      self.args.max_running_time = rdfvalue.Duration("%ds" % int(
          ProcessHuntResultCollectionsCronFlow.lifetime.seconds * 0.6))

    while not self.CheckIfRunningTooLong():
      count = self.ProcessOneHunt(exceptions_by_hunt)
      if not count:
        break

    if exceptions_by_hunt:
      e = ResultsProcessingError()
      for hunt_urn, exceptions_by_plugin in exceptions_by_hunt.items():
        for plugin, exceptions in exceptions_by_plugin.items():
          for exception in exceptions:
            e.RegisterSubException(hunt_urn, plugin, exception)
      raise e
Пример #18
0
 def testStringRepresentationIsTransitive(self):
   t = rdfvalue.Duration("5m")
   self.assertEqual(t.seconds, 300)
   self.assertEqual(t, rdfvalue.Duration(300))
   self.assertEqual(str(t), "5m")
Пример #19
0
    def Handle(self, args, token=None):
        start_time = args.start
        end_time = args.end

        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()

        if not start_time:
            start_time = end_time - rdfvalue.Duration("30m")

        fd = aff4.FACTORY.Create(args.client_id.ToClientURN().Add("stats"),
                                 aff4_type=aff4_stats.ClientStats,
                                 mode="r",
                                 token=token,
                                 age=(start_time, end_time))

        stat_values = list(fd.GetValuesForAttribute(fd.Schema.STATS))
        points = []
        for stat_value in reversed(stat_values):
            if args.metric == args.Metric.CPU_PERCENT:
                points.extend((s.cpu_percent, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_SYSTEM:
                points.extend((s.system_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_USER:
                points.extend((s.user_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.IO_READ_BYTES:
                points.extend(
                    (s.read_bytes, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_BYTES:
                points.extend((s.write_bytes, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_READ_OPS:
                points.extend(
                    (s.read_count, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_OPS:
                points.extend((s.write_count, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED:
                points.append((stat_value.bytes_received, stat_value.age))
            elif args.metric == args.Metric.NETWORK_BYTES_SENT:
                points.append((stat_value.bytes_sent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_PERCENT:
                points.append((stat_value.memory_percent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_RSS_SIZE:
                points.append((stat_value.RSS_size, stat_value.age))
            elif args.metric == args.Metric.MEMORY_VMS_SIZE:
                points.append((stat_value.VMS_size, stat_value.age))
            else:
                raise ValueError("Unknown metric.")

        # Points collected from "cpu_samples" and "io_samples" may not be correctly
        # sorted in some cases (as overlaps between different stat_values are
        # possible).
        points.sort(key=lambda x: x[1])

        ts = timeseries.Timeseries()
        ts.MultiAppend(points)

        if args.metric not in self.GAUGE_METRICS:
            ts.MakeIncreasing()

        if len(stat_values) > self.MAX_SAMPLES:
            sampling_interval = rdfvalue.Duration.FromSeconds(
                ((end_time - start_time).seconds / self.MAX_SAMPLES) or 1)
            if args.metric in self.GAUGE_METRICS:
                mode = timeseries.NORMALIZE_MODE_GAUGE
            else:
                mode = timeseries.NORMALIZE_MODE_COUNTER

            ts.Normalize(sampling_interval, start_time, end_time, mode=mode)

        result = ApiGetClientLoadStatsResult()
        for value, timestamp in ts.data:
            dp = api_stats.ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                        value=value)
            result.data_points.append(dp)

        return result
Пример #20
0
 def testAddDuration(self):
   duration = rdfvalue.Duration("12h")
   date = rdfvalue.RDFDatetime(1e9)
   self.assertEqual(int(date + duration), 1e9 + 12 * 3600e6)
   duration = rdfvalue.Duration("-60s")
   self.assertEqual(int(date + duration), 1e9 - 60e6)
Пример #21
0
class IndexedSequentialCollection(SequentialCollection):
    """An indexed sequential collection of RDFValues.

  Adds an index to SequentialCollection, making it efficient to find the number
  of records present, and to find a particular record number.

  IMPLEMENTATION NOTE: The index is created lazily, and for records older than
    INDEX_WRITE_DELAY.
  """

    # How many records between index entries. Subclasses may change this.  The
    # full index must fit comfortably in RAM, default is meant to be reasonable
    # for collections of up to ~1b small records. (Assumes we can have ~1m index
    # points in ram, and that reading 1k records is reasonably fast.)

    INDEX_SPACING = 1024

    # An attribute name of the form "index:sc_<i>" at timestamp <t> indicates that
    # the item with record number i was stored at timestamp t. The timestamp
    # suffix is stored as the value.

    INDEX_ATTRIBUTE_PREFIX = "index:sc_"

    # The time to wait before creating an index for a record - hacky defense
    # against the correct index changing due to a late write.

    INDEX_WRITE_DELAY = rdfvalue.Duration("5m")

    def __init__(self, urn, **kwargs):
        super(IndexedSequentialCollection, self).__init__(urn, **kwargs)
        self._index = None

    def _ReadIndex(self):
        self._index = {0: (0, 0)}
        self._max_indexed = 0
        for (attr, value,
             ts) in data_store.DB.ResolvePrefix(self.urn,
                                                self.INDEX_ATTRIBUTE_PREFIX,
                                                token=self.token):
            i = int(attr[len(self.INDEX_ATTRIBUTE_PREFIX):], 16)
            self._index[i] = (ts, int(value, 16))
            self._max_indexed = max(i, self._max_indexed)

    def _MaybeWriteIndex(self, i, ts):
        """Write index marker i."""
        if i > self._max_indexed and i % self.INDEX_SPACING == 0:
            # We only write the index if the timestamp is more than 5 minutes in the
            # past: hacky defense against a late write changing the count.
            if ts[0] < (rdfvalue.RDFDatetime().Now() -
                        self.INDEX_WRITE_DELAY).AsMicroSecondsFromEpoch():
                data_store.DB.Set(self.urn,
                                  self.INDEX_ATTRIBUTE_PREFIX + "%08x" % i,
                                  "%06x" % ts[1],
                                  ts[0],
                                  token=self.token,
                                  replace=True)
                self._index[i] = ts
                self._max_indexed = max(i, self._max_indexed)

    def _IndexedScan(self, i, max_records=None):
        """Scan records starting with index i."""
        if not self._index:
            self._ReadIndex()

        # The record number that we will read next.
        idx = 0
        # The timestamp that we will start reading from.
        start_ts = 0
        if i >= self._max_indexed:
            start_ts = max((0, 0), (self._index[self._max_indexed][0],
                                    self._index[self._max_indexed][1] - 1))
            idx = self._max_indexed
        else:
            try:
                possible_idx = i - i % self.INDEX_SPACING
                start_ts = (max(0, self._index[possible_idx][0]),
                            self._index[possible_idx][1] - 1)
                idx = possible_idx
            except KeyError:
                pass

        if max_records is not None:
            max_records += i - idx

        for (ts, value) in self.Scan(after_timestamp=start_ts,
                                     max_records=max_records,
                                     include_suffix=True):
            self._MaybeWriteIndex(idx, ts)
            if idx >= i:
                yield (idx, ts, value)
            idx += 1

    def GenerateItems(self, offset=0):
        for (idx, _, value) in self._IndexedScan(offset):
            yield (idx, value)

    def __getitem__(self, index):
        if index >= 0:
            for (_, _, value) in self._IndexedScan(index, max_records=1):
                return value
            return None
        else:
            raise RuntimeError("Index must be >= 0")

    def CalculateLength(self):
        if not self._index:
            self._ReadIndex()
        last_idx = self._max_indexed
        for (i, _, _) in self._IndexedScan(last_idx):
            last_idx = i
        return last_idx + 1

    def __len__(self):
        return self.CalculateLength()
Пример #22
0
 def testOfflineClientStatus(self):
     client_id = self.CreateClient(last_ping=rdfvalue.RDFDatetime().Now() -
                                   rdfvalue.Duration("1d"))
     self.Open("/#c=" + str(client_id))
     self.WaitUntil(self.IsElementPresent, "css=img[src$='offline.png']")
Пример #23
0
class CronApprovals30(CronApprovals):
  """Last month's cron approvals."""
  category = "/Server/Approvals/Crons/ 30 days"
  title = "Cron approval requests and grants for the last 30 days"
  time_offset = rdfvalue.Duration("30d")
Пример #24
0
 def setUp(self):
     super(ApprovalTest, self).setUp()
     self.client_id = self.SetupClients(1)[0]
     self.approval_expiration = rdfvalue.Duration(
         "%ds" % config_lib.CONFIG["ACL.token_expiry"])
Пример #25
0
    def SetupSampleMetrics(token=None):
        store = aff4.FACTORY.Create(None, "StatsStore", mode="w", token=token)

        stats.STATS.RegisterCounterMetric("grr_frontendserver_handle_num")
        stats.STATS.RegisterCounterMetric(
            "grr_frontendserver_handle_throttled_num")

        now = rdfvalue.RDFDatetime().Now()
        handle_data = [(3, now - rdfvalue.Duration("50m")),
                       (0, now - rdfvalue.Duration("45m")),
                       (1, now - rdfvalue.Duration("40m")),
                       (0, now - rdfvalue.Duration("35m")),
                       (0, now - rdfvalue.Duration("30m")),
                       (1, now - rdfvalue.Duration("25m")),
                       (0, now - rdfvalue.Duration("20m")),
                       (0, now - rdfvalue.Duration("15m")),
                       (0, now - rdfvalue.Duration("10m")),
                       (5, now - rdfvalue.Duration("5m")), (0, now)]
        handle_data = [(value, timestamp.AsMicroSecondsFromEpoch())
                       for value, timestamp in handle_data]
        for value, timestamp in handle_data:
            with test_lib.FakeTime(timestamp / 1e6):
                stats.STATS.IncrementCounter("grr_frontendserver_handle_num",
                                             value)
                store.WriteStats(process_id="frontend")

        throttle_data = [(0, now - rdfvalue.Duration("50m")),
                         (0, now - rdfvalue.Duration("45m")),
                         (0, now - rdfvalue.Duration("40m")),
                         (0, now - rdfvalue.Duration("35m")),
                         (0, now - rdfvalue.Duration("30m")),
                         (0, now - rdfvalue.Duration("25m")),
                         (0, now - rdfvalue.Duration("20m")),
                         (0, now - rdfvalue.Duration("15m")),
                         (0, now - rdfvalue.Duration("10m")),
                         (0, now - rdfvalue.Duration("5m")), (0, now)]
        throttle_data = [(value, timestamp.AsMicroSecondsFromEpoch())
                         for value, timestamp in throttle_data]

        for value, timestamp in throttle_data:
            with test_lib.FakeTime(timestamp / 1e6):
                stats.STATS.IncrementCounter(
                    "grr_frontendserver_handle_throttled_num", value)
                store.WriteStats(process_id="frontend")
Пример #26
0
  def testNewHuntWizard(self):
    self.CreateHuntFixtureWithTwoClients()

    # Open up and click on View Hunts.
    self.Open("/")
    self.WaitUntil(self.IsElementPresent, "client_query")
    self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
    self.Click("css=a[grrtarget=hunts]")
    self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")

    # Open up "New Hunt" wizard
    self.Click("css=button[name=NewHunt]")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('What to run?')")

    # Click on Filesystem item in flows list
    self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
    self.Click("css=#_Filesystem > i.jstree-icon")

    # Click on the FileFinder item in Filesystem flows list
    self.Click("link=File Finder")

    # Wait for flow configuration form to be rendered (just wait for first
    # input field).
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-new-hunt-wizard-form label:contains('Paths')")

    # Change "path" and "pathtype" values
    self.Type(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-repeated-field:has(label:contains('Paths')) "
        "input", "/tmp")
    self.Select(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-single-field:has(label:contains('Pathtype')) "
        "select", "TSK")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Click on "Back" button and check that all the values in the form
    # remain intact.
    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-new-hunt-wizard-form label:contains('Paths')")

    self.assertEqual(
        "/tmp",
        self.GetValue(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-repeated-field:has(label:contains('Paths')) input"))

    self.assertEqual(
        "TSK",
        self.GetSelectedLabel(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-single-field:has(label:contains('Pathtype')) select"
        ))

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Configure the hunt to use dummy output plugin.
    self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
    self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
    self.Type(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-single-field:has(label:contains('Filename Regex')) "
        "input", "some regex")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Where to run?')")

    # Empty set of rules should be valid.
    self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")

    # A note informs what an empty set of rules means.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('No rules specified!')")

    # Alternative match mode that matches a client if
    # any of the rules evaluates to true can be selected.
    self.Select(
        "css=grr-configure-rules-page "
        "label:contains('Match mode') ~ * select", "Match any")

    # The note depends on the match mode.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('No rules specified!')")

    # Create 3 foreman rules. Note that "Add" button adds rules
    # to the beginning of a list. So we always use :nth(0) selector.
    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
    rule = foreman_rules.ForemanRegexClientRule
    label = rule.ForemanStringField.SYSTEM.description
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Field') ~ * select", label)
    self.Type(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Attribute regex') ~ * input", "Linux")

    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Select("css=grr-configure-rules-page div.well:nth(0) select",
                "Integer")

    rule = foreman_rules.ForemanIntegerClientRule
    label = rule.ForemanIntegerField.CLIENT_CLOCK.description
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Field') ~ * select", label)
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Operator') ~ * select", "GREATER_THAN")
    self.Type(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Value') ~ * input", "1336650631137737")

    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Click("css=grr-configure-rules-page div.well:nth(0) "
               "label:contains('Os darwin') ~ * input[type=checkbox]")

    # Click on "Back" button
    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Click on "Next" button again and check that all the values that
    # we've just entered remain intact.
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Where to run?')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Review')")

    # Check that the arguments summary is present.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Paths')")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('/tmp')")

    # Check that output plugins are shown.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-wizard-form:contains('DummyOutputPlugin')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))

    # Check that there's no deprecated rules summary.
    self.assertFalse(
        self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
    self.assertFalse(
        self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))

    # Check that rules summary is present.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-wizard-form:contains('Client rule set')"))

    # Click on "Run" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")

    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Created Hunt')")

    # Close the window and check that the hunt was created.
    self.Click("css=button.Next")

    # Select newly created hunt.
    self.Click("css=grr-hunts-list td:contains('GenericHunt')")

    # Check that correct details are displayed in hunt details tab.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-hunt-inspector:contains('GenericHunt')")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-hunt-inspector:contains('Flow Arguments')")

    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))

    self.assertTrue(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))

    # Check that there's no deprecated rules summary.
    self.assertFalse(
        self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
    self.assertFalse(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('Integer rules')"))

    # Check that rules summary is present.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('Client Rule Set')"))

    # Check that the hunt object was actually created
    hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
    hunts_list = list(hunts_root.OpenChildren())
    self.assertEqual(len(hunts_list), 1)

    # Check that the hunt was created with a correct flow
    hunt = hunts_list[0]
    self.assertEqual(hunt.args.flow_runner_args.flow_name,
                     file_finder.FileFinder.__name__)
    self.assertEqual(hunt.args.flow_args.paths[0], "/tmp")
    self.assertEqual(hunt.args.flow_args.pathtype,
                     rdf_paths.PathSpec.PathType.TSK)
    # self.assertEqual(hunt.args.flow_args.ignore_errors, True)
    self.assertTrue(hunt.runner_args.output_plugins[0].plugin_name,
                    "DummyOutputPlugin")

    # Check that hunt was not started
    self.assertEqual(hunt.Get(hunt.Schema.STATE), "PAUSED")

    # Now try to start the hunt.
    self.Click("css=button[name=RunHunt]")

    # Note that hunt ACL controls are already tested in acl_manager_test.py.

    # Run the hunt.
    with aff4.FACTORY.Open(hunt.urn, mode="rw", token=self.token) as hunt:
      hunt.Run()

    # Check that the hunt was created with correct rules
    hunt_rules = self.FindForemanRules(hunt, token=self.token)

    self.assertEqual(len(hunt_rules), 1)
    lifetime = hunt_rules[0].GetLifetime()
    lifetime -= rdfvalue.Duration("2w")
    self.assertLessEqual(lifetime, rdfvalue.Duration("1s"))

    r = hunt_rules[0].client_rule_set

    self.assertEqual(r.match_mode,
                     foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
    self.assertEqual(len(r.rules), 3)

    self.assertEqual(r.rules[0].rule_type,
                     foreman_rules.ForemanClientRule.Type.OS)
    self.assertEqual(r.rules[0].os.os_windows, False)
    self.assertEqual(r.rules[0].os.os_linux, False)
    self.assertEqual(r.rules[0].os.os_darwin, True)

    self.assertEqual(r.rules[1].rule_type,
                     foreman_rules.ForemanClientRule.Type.INTEGER)
    self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
    self.assertEqual(
        r.rules[1].integer.operator,
        foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
    self.assertEqual(r.rules[1].integer.value, 1336650631137737)

    self.assertEqual(r.rules[2].rule_type,
                     foreman_rules.ForemanClientRule.Type.REGEX)
    self.assertEqual(r.rules[2].regex.field, "SYSTEM")
    self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
Пример #27
0
    def testRefreshFileStartsFlow(self):
        self.Open("/")

        self.Type("client_query", "C.0000000000000001")
        self.Click("client_query_submit")

        self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
                            "css=span[type=subject]")

        # Choose client 1.
        self.Click("css=td:contains('0001')")

        # Go to Browse VFS.
        self.Click("css=a:contains('Browse Virtual Filesystem')")

        self.Click("css=#_fs i.jstree-icon")
        self.Click("css=#_fs-os i.jstree-icon")
        self.Click("css=#_fs-os-c i.jstree-icon")

        # Test file versioning.
        self.WaitUntil(self.IsElementPresent, "css=#_fs-os-c-Downloads")
        self.Click("link=Downloads")

        # Select a file and start a flow by requesting a newer version.
        self.Click("css=tr:contains(\"a.txt\")")
        self.Click("css=li[heading=Download]")
        self.Click("css=button:contains(\"Collect from the client\")")

        # Create a new file version (that would have been created by the flow
        # otherwise) and finish the flow.
        with self.ACLChecksDisabled():
            client_id = rdf_client.ClientURN("C.0000000000000001")

            fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)

            # Make sure that the flow has started (when button is clicked, the HTTP
            # API request is sent asynchronously).
            def MultiGetFileStarted():
                return "MultiGetFile" in list(x.__class__.__name__
                                              for x in fd.OpenChildren())

            self.WaitUntil(MultiGetFileStarted)

            flows = list(fd.ListChildren())

            client_mock = action_mocks.MultiGetFileClientMock()
            for flow_urn in flows:
                for _ in test_lib.TestFlowHelper(flow_urn,
                                                 client_mock,
                                                 client_id=client_id,
                                                 check_flow_errors=False,
                                                 token=self.token):
                    pass

            time_in_future = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration(
                "1h")
            # We have to make sure that the new version will not be within a second
            # from the current one, otherwise the previous one and the new one will
            # be indistinguishable in the UI (as it has a 1s precision when
            # displaying versions).
            with test_lib.FakeTime(time_in_future):
                with aff4.FACTORY.Open(
                        "aff4:/C.0000000000000001/fs/os/c/Downloads/a.txt",
                        aff4_type=aff4_grr.VFSFile,
                        mode="rw",
                        token=self.token) as fd:
                    fd.Write("The newest version!")

        # Once the flow has finished, the file view should update and add the
        # newly created, latest version of the file to the list. The selected
        # option should still be "HEAD".
        self.WaitUntilContains("HEAD", self.GetText,
                               "css=.version-dropdown > option[selected]")
        self.WaitUntilContains(gui_test_lib.DateTimeString(time_in_future),
                               self.GetText,
                               "css=.version-dropdown > option:nth(1)")

        # The file table should also update and display the new timestamp.
        self.WaitUntil(
            self.IsElementPresent,
            "css=grr-file-table tbody > tr td:contains(\"%s\")" %
            (gui_test_lib.DateTimeString(time_in_future)))

        # Make sure the file content has changed.
        self.Click("css=li[heading=TextView]")
        self.WaitUntilContains("The newest version!", self.GetText,
                               "css=div.monospace pre")

        # Go to the flow management screen and check that there was a new flow.
        self.Click("css=a:contains('Manage launched flows')")
        self.Click("css=grr-flows-list tr:contains('MultiGetFile')")
        self.WaitUntilContains("MultiGetFile", self.GetText,
                               "css=#main_bottomPane")

        self.WaitUntilContains(
            "c/Downloads/a.txt", self.GetText,
            "css=#main_bottomPane table > tbody td.proto_key:contains(\"Path\") "
            "~ td.proto_value")
Пример #28
0
class StatusRenderer(renderers.TemplateRenderer):
    """A renderer for the online status line."""

    MAX_TIME_SINCE_CRASH = rdfvalue.Duration("1w")

    layout_template = renderers.Template("""
Status: {{this.icon|safe}}
{{this.last_seen_msg|escape}}.
{% if this.ip_description %}
  <br>
  {{this.ip_icon|safe}} {{this.ip_description|escape}}
{% endif %}
{% if this.last_crash %}
  <br>
  <strong>Last crash:</strong><br>
  <img class='grr-icon' src='/static/images/skull-icon.png'> {{this.last_crash}}<br/>
{% endif %}
{% if this.disk_full %}
  <br>
  <img class='grr-icon' src='/static/images/hdd-bang-icon.png'>
  <strong>Disk free space low:</strong><br>
  {% for message in this.disk_full %}
    {{message|escape}}<br/>
  {% endfor %}
{% endif %}
<br>
""")

    def Layout(self, request, response):
        """Manage content pane depending on passed in query parameter."""

        client_id = request.REQ.get("client_id")
        if client_id:
            client_id = rdfvalue.ClientURN(client_id)
            client = aff4.FACTORY.Open(client_id, token=request.token)

            self.last_crash = None
            crash = client.Get(client.Schema.LAST_CRASH)
            if crash:
                time_since_crash = rdfvalue.RDFDatetime().Now(
                ) - crash.timestamp
                if time_since_crash < self.MAX_TIME_SINCE_CRASH:
                    self.last_crash = FormatLastSeenTime(crash.timestamp)

            self.disk_full = GetLowDiskWarnings(client)

            ping = client.Get(client.Schema.PING)
            if ping:
                age = ping
            else:
                age = 0

            # Also check for proper access.
            aff4.FACTORY.Open(client.urn.Add("fs"), token=request.token)

            self.icon = OnlineStateIcon(age).RawHTML(request)
            self.last_seen_msg = FormatLastSeenTime(age)

            ip = client.Get(client.Schema.CLIENT_IP)
            (status, description) = utils.RetrieveIPInfo(ip)
            self.ip_icon = IPStatusIcon(status).RawHTML(request)
            self.ip_description = description

        return super(StatusRenderer, self).Layout(request, response)

    def RenderAjax(self, request, response):
        return self.Layout(request, response)
Пример #29
0
    def testFlowDuplicateLimit(self):
        # Disable the request limit checking by setting it to 0.
        throttler = throttle.FlowThrottler(
            daily_req_limit=0, dup_interval=rdfvalue.Duration("1200s"))

        # Running the same flow immediately should fail
        with test_lib.FakeTime(self.BASE_TIME):
            throttler.EnforceLimits(self.client_id,
                                    self.token.username,
                                    flow_test_lib.DummyLogFlow.__name__,
                                    None,
                                    token=self.token)

            flow.GRRFlow.StartFlow(
                client_id=self.client_id,
                flow_name=flow_test_lib.DummyLogFlow.__name__,
                token=self.token)

            with self.assertRaises(throttle.ErrorFlowDuplicate):
                throttler.EnforceLimits(self.client_id,
                                        self.token.username,
                                        flow_test_lib.DummyLogFlow.__name__,
                                        None,
                                        token=self.token)

        # Doing the same outside the window should work
        with test_lib.FakeTime(self.BASE_TIME + 1200 + 1):
            throttler.EnforceLimits(self.client_id,
                                    self.token.username,
                                    flow_test_lib.DummyLogFlow.__name__,
                                    None,
                                    token=self.token)

            flow.GRRFlow.StartFlow(
                client_id=self.client_id,
                flow_name=flow_test_lib.DummyLogFlow.__name__,
                token=self.token)

            with self.assertRaises(throttle.ErrorFlowDuplicate):
                throttler.EnforceLimits(self.client_id,
                                        self.token.username,
                                        flow_test_lib.DummyLogFlow.__name__,
                                        None,
                                        token=self.token)

        # Now try a flow with more complicated args
        args = rdf_file_finder.FileFinderArgs(
            paths=["/tmp/1", "/tmp/2"],
            action=rdf_file_finder.FileFinderAction(action_type="STAT"))

        with test_lib.FakeTime(self.BASE_TIME):
            throttler.EnforceLimits(self.client_id,
                                    self.token.username,
                                    file_finder.FileFinder.__name__,
                                    args,
                                    token=self.token)

            flow.GRRFlow.StartFlow(
                client_id=self.client_id,
                flow_name=file_finder.FileFinder.__name__,
                token=self.token,
                paths=["/tmp/1", "/tmp/2"],
                action=rdf_file_finder.FileFinderAction(action_type="STAT"))

            with self.assertRaises(throttle.ErrorFlowDuplicate):
                throttler.EnforceLimits(self.client_id,
                                        self.token.username,
                                        file_finder.FileFinder.__name__,
                                        args,
                                        token=self.token)

            # Different args should succeed.
            args = rdf_file_finder.FileFinderArgs(
                paths=["/tmp/1", "/tmp/3"],
                action=rdf_file_finder.FileFinderAction(action_type="STAT"))

            throttler.EnforceLimits(self.client_id,
                                    self.token.username,
                                    file_finder.FileFinder.__name__,
                                    args,
                                    token=self.token)
Пример #30
0
    def CheckAccess(self, token):
        """Enforce a dual approver policy for access."""
        namespace, _ = self.urn.Split(2)

        if namespace != "ACL":
            raise access_control.UnauthorizedAccess(
                "Approval object has invalid urn %s.",
                subject=self.urn,
                requested_access=token.requested_access)

        user, subject_urn = self.InferUserAndSubjectFromUrn()
        if user != token.username:
            raise access_control.UnauthorizedAccess(
                "Approval object is not for user %s." % token.username,
                subject=self.urn,
                requested_access=token.requested_access)

        now = rdfvalue.RDFDatetime().Now()

        # Is this an emergency access?
        break_glass = self.Get(self.Schema.BREAK_GLASS)
        if break_glass and now < break_glass:
            # This tags the token as an emergency token.
            token.is_emergency = True
            return True

        lifetime = rdfvalue.Duration(
            self.Get(self.Schema.LIFETIME)
            or config_lib.CONFIG["ACL.token_expiry"])

        # Check that there are enough approvers.
        approvers = set()
        for approver in self.GetValuesForAttribute(self.Schema.APPROVER):
            if approver.age + lifetime > now:
                approvers.add(utils.SmartStr(approver))

        if len(approvers) < config_lib.CONFIG["ACL.approvers_required"]:
            raise access_control.UnauthorizedAccess(
                ("Requires %s approvers for access." %
                 config_lib.CONFIG["ACL.approvers_required"]),
                subject=subject_urn,
                requested_access=token.requested_access)

        if self.checked_approvers_label:
            approvers_with_label = []

            # We need to check labels with high privilege since normal users can
            # inspect other user's labels.
            for approver in approvers:
                try:
                    data_store.DB.security_manager.CheckUserLabels(
                        approver, [self.checked_approvers_label],
                        token=token.SetUID())
                    approvers_with_label.append(approver)
                except access_control.UnauthorizedAccess:
                    pass

            if len(approvers_with_label) < self.min_approvers_with_label:
                raise access_control.UnauthorizedAccess(
                    "At least %d approver(s) should have '%s' label." %
                    (self.min_approvers_with_label,
                     self.checked_approvers_label),
                    subject=subject_urn,
                    requested_access=token.requested_access)

        return True