Exemplo n.º 1
0
 def testFloorExact(self):
     datetime = rdfvalue.RDFDatetime.FromHumanReadable(
         "2011-11-11 12:34:56")
     self.assertEqual(datetime.Floor(rdfvalue.Duration("1s")), datetime)
Exemplo n.º 2
0
    def testDownsampled(self):
        timestamp = rdfvalue.RDFDatetime.FromHumanReadable

        stats = rdf_client.ClientStats(
            cpu_samples=[
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:00"),
                                     user_cpu_time=2.5,
                                     system_cpu_time=3.2,
                                     cpu_percent=0.5),
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:05"),
                                     user_cpu_time=2.6,
                                     system_cpu_time=4.7,
                                     cpu_percent=0.6),
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:10"),
                                     user_cpu_time=10.0,
                                     system_cpu_time=14.2,
                                     cpu_percent=0.9),
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:12"),
                                     user_cpu_time=12.3,
                                     system_cpu_time=14.9,
                                     cpu_percent=0.1),
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:21"),
                                     user_cpu_time=16.1,
                                     system_cpu_time=22.3,
                                     cpu_percent=0.4)
            ],
            io_samples=[
                rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:00"),
                                    read_count=0,
                                    write_count=0),
                rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:02"),
                                    read_count=3,
                                    write_count=5),
                rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:12"),
                                    read_count=6,
                                    write_count=8),
            ])

        expected = rdf_client.ClientStats(
            cpu_samples=[
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:05"),
                                     user_cpu_time=2.6,
                                     system_cpu_time=4.7,
                                     cpu_percent=0.55),
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:12"),
                                     user_cpu_time=12.3,
                                     system_cpu_time=14.9,
                                     cpu_percent=0.5),
                rdf_client.CpuSample(timestamp=timestamp("2001-01-01 00:21"),
                                     user_cpu_time=16.1,
                                     system_cpu_time=22.3,
                                     cpu_percent=0.4),
            ],
            io_samples=[
                rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:02"),
                                    read_count=3,
                                    write_count=5),
                rdf_client.IOSample(timestamp=timestamp("2001-01-01 00:12"),
                                    read_count=6,
                                    write_count=8),
            ])

        actual = rdf_client.ClientStats.Downsampled(
            stats, interval=rdfvalue.Duration("10m"))

        self.assertEqual(actual, expected)
Exemplo n.º 3
0
  def testNewHuntWizard(self):
    self.CreateHuntFixtureWithTwoClients()

    # Open up and click on View Hunts.
    self.Open("/")
    self.WaitUntil(self.IsElementPresent, "client_query")
    self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
    self.Click("css=a[grrtarget=hunts]")
    self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")

    # Open up "New Hunt" wizard
    self.Click("css=button[name=NewHunt]")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('What to run?')")

    # Click on Filesystem item in flows list
    self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
    self.Click("css=#_Filesystem > i.jstree-icon")

    # Click on the FileFinder item in Filesystem flows list
    self.Click("link=File Finder")

    # Wait for flow configuration form to be rendered (just wait for first
    # input field).
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-new-hunt-wizard-form label:contains('Paths')")

    # Change "path" and "pathtype" values
    self.Type(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-repeated-field:has(label:contains('Paths')) "
        "input", "/tmp")
    self.Select(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-single-field:has(label:contains('Pathtype')) "
        "select", "TSK")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Click on "Back" button and check that all the values in the form
    # remain intact.
    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-new-hunt-wizard-form label:contains('Paths')")

    self.assertEqual(
        "/tmp",
        self.GetValue(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-repeated-field:has(label:contains('Paths')) input"))

    self.assertEqual(
        "TSK",
        self.GetSelectedLabel(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-single-field:has(label:contains('Pathtype')) select"
        ))

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Configure the hunt to use dummy output plugin.
    self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
    self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
    self.Type(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-single-field:has(label:contains('Filename Regex')) "
        "input", "some regex")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Where to run?')")

    # Empty set of rules should be valid.
    self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")

    # A note informs what an empty set of rules means.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('No rules specified!')")

    # Alternative match mode that matches a client if
    # any of the rules evaluates to true can be selected.
    self.Select(
        "css=grr-configure-rules-page "
        "label:contains('Match mode') ~ * select", "Match any")

    # The note depends on the match mode.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('No rules specified!')")

    # Create 3 foreman rules. Note that "Add" button adds rules
    # to the beginning of a list. So we always use :nth(0) selector.
    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
    rule = foreman_rules.ForemanRegexClientRule
    label = rule.ForemanStringField.SYSTEM.description
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Field') ~ * select", label)
    self.Type(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Attribute regex') ~ * input", "Linux")

    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Select("css=grr-configure-rules-page div.well:nth(0) select",
                "Integer")

    rule = foreman_rules.ForemanIntegerClientRule
    label = rule.ForemanIntegerField.CLIENT_CLOCK.description
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Field') ~ * select", label)
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Operator') ~ * select", "GREATER_THAN")
    self.Type(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Value') ~ * input", "1336650631137737")

    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Click("css=grr-configure-rules-page div.well:nth(0) "
               "label:contains('Os darwin') ~ * input[type=checkbox]")

    # Click on "Back" button
    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Click on "Next" button again and check that all the values that
    # we've just entered remain intact.
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Where to run?')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Review')")

    # Check that the arguments summary is present.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Paths')")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('/tmp')")

    # Check that output plugins are shown.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-wizard-form:contains('DummyOutputPlugin')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))

    # Check that there's no deprecated rules summary.
    self.assertFalse(
        self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
    self.assertFalse(
        self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))

    # Check that rules summary is present.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-wizard-form:contains('Client rule set')"))

    # Click on "Run" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")

    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Created Hunt')")

    # Close the window and check that the hunt was created.
    self.Click("css=button.Next")

    # Select newly created hunt.
    self.Click("css=grr-hunts-list td:contains('GenericHunt')")

    # Check that correct details are displayed in hunt details tab.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-hunt-inspector:contains('GenericHunt')")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-hunt-inspector:contains('Flow Arguments')")

    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))

    self.assertTrue(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))

    # Check that there's no deprecated rules summary.
    self.assertFalse(
        self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
    self.assertFalse(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('Integer rules')"))

    # Check that rules summary is present.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('Client Rule Set')"))

    # Check that the hunt object was actually created
    hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
    hunts_list = list(hunts_root.OpenChildren())
    self.assertEqual(len(hunts_list), 1)

    # Check that the hunt was created with a correct flow
    hunt = hunts_list[0]
    self.assertEqual(hunt.args.flow_runner_args.flow_name,
                     file_finder.FileFinder.__name__)
    self.assertEqual(hunt.args.flow_args.paths[0], "/tmp")
    self.assertEqual(hunt.args.flow_args.pathtype,
                     rdf_paths.PathSpec.PathType.TSK)
    # self.assertEqual(hunt.args.flow_args.ignore_errors, True)
    self.assertTrue(hunt.runner_args.output_plugins[0].plugin_name,
                    "DummyOutputPlugin")

    # Check that hunt was not started
    self.assertEqual(hunt.Get(hunt.Schema.STATE), "PAUSED")

    # Now try to start the hunt.
    self.Click("css=button[name=RunHunt]")

    # Note that hunt ACL controls are already tested in acl_manager_test.py.

    # Run the hunt.
    with aff4.FACTORY.Open(hunt.urn, mode="rw", token=self.token) as hunt:
      hunt.Run()

    # Check that the hunt was created with correct rules
    hunt_rules = self.FindForemanRules(hunt, token=self.token)

    self.assertEqual(len(hunt_rules), 1)
    lifetime = hunt_rules[0].GetLifetime()
    lifetime -= rdfvalue.Duration("2w")
    self.assertLessEqual(lifetime, rdfvalue.Duration("1s"))

    r = hunt_rules[0].client_rule_set

    self.assertEqual(r.match_mode,
                     foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
    self.assertEqual(len(r.rules), 3)

    self.assertEqual(r.rules[0].rule_type,
                     foreman_rules.ForemanClientRule.Type.OS)
    self.assertEqual(r.rules[0].os.os_windows, False)
    self.assertEqual(r.rules[0].os.os_linux, False)
    self.assertEqual(r.rules[0].os.os_darwin, True)

    self.assertEqual(r.rules[1].rule_type,
                     foreman_rules.ForemanClientRule.Type.INTEGER)
    self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
    self.assertEqual(
        r.rules[1].integer.operator,
        foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
    self.assertEqual(r.rules[1].integer.value, 1336650631137737)

    self.assertEqual(r.rules[2].rule_type,
                     foreman_rules.ForemanClientRule.Type.REGEX)
    self.assertEqual(r.rules[2].regex.field, "SYSTEM")
    self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
Exemplo n.º 4
0
    def testRunWithoutContext(self):
        weez_timeline = test_lib.FakeTimeline(
            threading.Thread(target=lambda: None))

        with self.assertRaisesRegexp(AssertionError, "called without context"):
            weez_timeline.Run(duration=rdfvalue.Duration("10s"))
Exemplo n.º 5
0
class IndexedSequentialCollection(SequentialCollection):
    """An indexed sequential collection of RDFValues.

  Adds an index to SequentialCollection, making it efficient to find the number
  of records present, and to find a particular record number.

  IMPLEMENTATION NOTE: The index is created lazily, and for records older than
    INDEX_WRITE_DELAY.
  """

    # How many records between index entries. Subclasses may change this.  The
    # full index must fit comfortably in RAM, default is meant to be reasonable
    # for collections of up to ~1b small records. (Assumes we can have ~1m index
    # points in ram, and that reading 1k records is reasonably fast.)

    INDEX_SPACING = 1024

    # An attribute name of the form "index:sc_<i>" at timestamp <t> indicates that
    # the item with record number i was stored at timestamp t. The timestamp
    # suffix is stored as the value.

    INDEX_ATTRIBUTE_PREFIX = "index:sc_"

    # The time to wait before creating an index for a record - hacky defense
    # against the correct index changing due to a late write.

    INDEX_WRITE_DELAY = rdfvalue.Duration("3m")

    def __init__(self, *args, **kwargs):
        super(IndexedSequentialCollection, self).__init__(*args, **kwargs)
        self._index = None

    def _ReadIndex(self):
        if self._index:
            return
        self._index = {0: (0, 0)}
        self._max_indexed = 0
        for (index, ts,
             suffix) in data_store.DB.CollectionReadIndex(self.collection_id):
            self._index[index] = (ts, suffix)
            self._max_indexed = max(index, self._max_indexed)

    def _MaybeWriteIndex(self, i, ts, mutation_pool):
        """Write index marker i."""
        if i > self._max_indexed and i % self.INDEX_SPACING == 0:
            # We only write the index if the timestamp is more than 5 minutes in the
            # past: hacky defense against a late write changing the count.
            if ts[0] < (rdfvalue.RDFDatetime.Now() -
                        self.INDEX_WRITE_DELAY).AsMicrosecondsSinceEpoch():
                mutation_pool.CollectionAddIndex(self.collection_id, i, ts[0],
                                                 ts[1])
                self._index[i] = ts
                self._max_indexed = max(i, self._max_indexed)

    def _IndexedScan(self, i, max_records=None):
        """Scan records starting with index i."""
        self._ReadIndex()

        # The record number that we will read next.
        idx = 0
        # The timestamp that we will start reading from.
        start_ts = 0
        if i >= self._max_indexed:
            start_ts = max((0, 0), (self._index[self._max_indexed][0],
                                    self._index[self._max_indexed][1] - 1))
            idx = self._max_indexed
        else:
            try:
                possible_idx = i - i % self.INDEX_SPACING
                start_ts = (max(0, self._index[possible_idx][0]),
                            self._index[possible_idx][1] - 1)
                idx = possible_idx
            except KeyError:
                pass

        if max_records is not None:
            max_records += i - idx

        with data_store.DB.GetMutationPool() as mutation_pool:
            for (ts, value) in self.Scan(after_timestamp=start_ts,
                                         max_records=max_records,
                                         include_suffix=True):
                self._MaybeWriteIndex(idx, ts, mutation_pool)
                if idx >= i:
                    yield (idx, ts, value)
                idx += 1

    def GenerateItems(self, offset=0):
        for (_, _, value) in self._IndexedScan(offset):
            yield value

    def __getitem__(self, index):
        if index >= 0:
            for (_, _, value) in self._IndexedScan(index, max_records=1):
                return value
            raise IndexError("collection index out of range")
        else:
            raise RuntimeError("Index must be >= 0")

    def CalculateLength(self):
        self._ReadIndex()
        highest_index = None
        for (i, _, _) in self._IndexedScan(self._max_indexed):
            highest_index = i
        if highest_index is None:
            return 0
        return highest_index + 1

    def __len__(self):
        return self.CalculateLength()

    def UpdateIndex(self):
        self._ReadIndex()
        for _ in self._IndexedScan(self._max_indexed):
            pass

    @classmethod
    def StaticAdd(cls,
                  collection_urn,
                  rdf_value,
                  timestamp=None,
                  suffix=None,
                  mutation_pool=None):
        r = super(IndexedSequentialCollection,
                  cls).StaticAdd(collection_urn,
                                 rdf_value,
                                 timestamp=timestamp,
                                 suffix=suffix,
                                 mutation_pool=mutation_pool)
        if random.randint(0, cls.INDEX_SPACING) == 0:
            BACKGROUND_INDEX_UPDATER.AddIndexToUpdate(cls, collection_urn)
        return r
Exemplo n.º 6
0
 def GenerateSample(self, number=5):
     return rdfvalue.Duration("%ds" % number)
Exemplo n.º 7
0
 def testAddDuration(self):
     duration = rdfvalue.Duration("12h")
     date = rdfvalue.RDFDatetime(1e9)
     self.assertEqual(int(date + duration), 1e9 + 12 * 3600e6)
     duration = rdfvalue.Duration("-60s")
     self.assertEqual(int(date + duration), 1e9 - 60e6)
Exemplo n.º 8
0
class ProcessHuntResultCollectionsCronFlow(cronjobs.SystemCronFlow):
    """Periodic cron flow that processes hunt results.

  The ProcessHuntResultCollectionsCronFlow reads hunt results stored in
  HuntResultCollections and feeds runs output plugins on them.
  """

    frequency = rdfvalue.Duration("5m")
    lifetime = rdfvalue.Duration("40m")
    allow_overruns = True

    BATCH_SIZE = 5000

    def CheckIfRunningTooLong(self):
        if self.max_running_time:
            elapsed = rdfvalue.RDFDatetime.Now() - self.start_time
            if elapsed > self.max_running_time:
                return True
        return False

    def LoadPlugins(self, metadata_obj):
        output_plugins = metadata_obj.Get(metadata_obj.Schema.OUTPUT_PLUGINS)
        if not output_plugins:
            return output_plugins, []

        output_plugins = output_plugins.ToDict()
        used_plugins = []
        unused_plugins = []

        for plugin_def, state in output_plugins.itervalues():
            if not hasattr(plugin_def, "GetPluginForState"):
                logging.error("Invalid plugin_def: %s", plugin_def)
                continue
            used_plugins.append(
                (plugin_def, plugin_def.GetPluginForState(state)))
        return output_plugins, used_plugins

    def RunPlugins(self, hunt_urn, plugins, results, exceptions_by_plugin):
        for plugin_def, plugin in plugins:
            try:
                plugin.ProcessResponses(results)
                plugin.Flush()

                plugin_status = output_plugin.OutputPluginBatchProcessingStatus(
                    plugin_descriptor=plugin_def,
                    status="SUCCESS",
                    batch_size=len(results))
                stats.STATS.IncrementCounter("hunt_results_ran_through_plugin",
                                             delta=len(results),
                                             fields=[plugin_def.plugin_name])

            except Exception as e:  # pylint: disable=broad-except
                logging.exception(
                    "Error processing hunt results: hunt %s, "
                    "plugin %s", hunt_urn, utils.SmartStr(plugin))
                self.Log("Error processing hunt results (hunt %s, "
                         "plugin %s): %s" %
                         (hunt_urn, utils.SmartStr(plugin), e))
                stats.STATS.IncrementCounter("hunt_output_plugin_errors",
                                             fields=[plugin_def.plugin_name])

                plugin_status = output_plugin.OutputPluginBatchProcessingStatus(
                    plugin_descriptor=plugin_def,
                    status="ERROR",
                    summary=utils.SmartStr(e),
                    batch_size=len(results))
                exceptions_by_plugin.setdefault(plugin_def, []).append(e)

            with data_store.DB.GetMutationPool() as pool:
                implementation.GRRHunt.PluginStatusCollectionForHID(
                    hunt_urn).Add(plugin_status, mutation_pool=pool)
                if plugin_status.status == plugin_status.Status.ERROR:
                    implementation.GRRHunt.PluginErrorCollectionForHID(
                        hunt_urn).Add(plugin_status, mutation_pool=pool)

    def ProcessOneHunt(self, exceptions_by_hunt):
        """Reads results for one hunt and process them."""
        hunt_results_urn, results = (
            hunts_results.HuntResultQueue.ClaimNotificationsForCollection(
                token=self.token, lease_time=self.lifetime))
        logging.debug("Found %d results for hunt %s", len(results),
                      hunt_results_urn)
        if not results:
            return 0

        hunt_urn = rdfvalue.RDFURN(hunt_results_urn.Dirname())
        batch_size = self.BATCH_SIZE
        metadata_urn = hunt_urn.Add("ResultsMetadata")
        exceptions_by_plugin = {}
        num_processed_for_hunt = 0
        collection_obj = implementation.GRRHunt.ResultCollectionForHID(
            hunt_urn)
        try:
            with aff4.FACTORY.OpenWithLock(metadata_urn,
                                           lease_time=600,
                                           token=self.token) as metadata_obj:
                all_plugins, used_plugins = self.LoadPlugins(metadata_obj)
                num_processed = int(
                    metadata_obj.Get(
                        metadata_obj.Schema.NUM_PROCESSED_RESULTS))
                for batch in utils.Grouper(results, batch_size):
                    results = list(
                        collection_obj.MultiResolve(
                            [r.value.ResultRecord() for r in batch]))
                    self.RunPlugins(hunt_urn, used_plugins, results,
                                    exceptions_by_plugin)

                    hunts_results.HuntResultQueue.DeleteNotifications(
                        batch, token=self.token)
                    num_processed += len(batch)
                    num_processed_for_hunt += len(batch)
                    self.HeartBeat()
                    metadata_obj.Set(
                        metadata_obj.Schema.NUM_PROCESSED_RESULTS(
                            num_processed))
                    metadata_obj.UpdateLease(600)
                    if self.CheckIfRunningTooLong():
                        logging.warning("Run too long, stopping.")
                        break

                metadata_obj.Set(
                    metadata_obj.Schema.OUTPUT_PLUGINS(all_plugins))
                metadata_obj.Set(
                    metadata_obj.Schema.NUM_PROCESSED_RESULTS(num_processed))
        except aff4.LockError:
            logging.warn(
                "ProcessHuntResultCollectionsCronFlow: "
                "Could not get lock on hunt metadata %s.", metadata_urn)
            return 0

        if exceptions_by_plugin:
            for plugin, exceptions in exceptions_by_plugin.items():
                exceptions_by_hunt.setdefault(hunt_urn, {}).setdefault(
                    plugin, []).extend(exceptions)

        logging.debug("Processed %d results.", num_processed_for_hunt)
        return len(results)

    @flow.StateHandler()
    def Start(self):
        self.start_time = rdfvalue.RDFDatetime.Now()

        exceptions_by_hunt = {}
        self.max_running_time = self.lifetime * 0.6

        while not self.CheckIfRunningTooLong():
            count = self.ProcessOneHunt(exceptions_by_hunt)
            if not count:
                break

        if exceptions_by_hunt:
            e = ResultsProcessingError()
            for hunt_urn, exceptions_by_plugin in exceptions_by_hunt.items():
                for plugin, exceptions in exceptions_by_plugin.items():
                    for exception in exceptions:
                        e.RegisterSubException(hunt_urn, plugin, exception)
            raise e
Exemplo n.º 9
0
Arquivo: vfs_test.py Projeto: qsdj/grr
class VfsTestMixin(object):
    """A helper mixin providing methods to prepare files and flows for testing.
  """

    time_0 = rdfvalue.RDFDatetime(42)
    time_1 = time_0 + rdfvalue.Duration("1d")
    time_2 = time_1 + rdfvalue.Duration("1d")

    # TODO(hanuszczak): This function not only contains a lot of code duplication
    # but is also a duplication with `gui_test_lib.CreateFileVersion(s)`. This
    # should be refactored in the near future.
    def CreateFileVersions(self, client_id, file_path):
        """Add a new version for a file."""
        path_type, components = rdf_objects.ParseCategorizedPath(file_path)

        with test_lib.FakeTime(self.time_1):
            token = access_control.ACLToken(username="******")
            fd = aff4.FACTORY.Create(client_id.Add(file_path),
                                     aff4.AFF4MemoryStream,
                                     mode="w",
                                     token=token)
            fd.Write("Hello World")
            fd.Close()

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo()
                path_info.path_type = path_type
                path_info.components = components
                path_info.directory = False

                data_store.REL_DB.WritePathInfos(client_id.Basename(),
                                                 [path_info])

        with test_lib.FakeTime(self.time_2):
            fd = aff4.FACTORY.Create(client_id.Add(file_path),
                                     aff4.AFF4MemoryStream,
                                     mode="w",
                                     token=token)
            fd.Write("Goodbye World")
            fd.Close()

            if data_store.RelationalDBWriteEnabled():
                path_info = rdf_objects.PathInfo()
                path_info.path_type = path_type
                path_info.components = components
                path_info.directory = False

                data_store.REL_DB.WritePathInfos(client_id.Basename(),
                                                 [path_info])

    def CreateRecursiveListFlow(self, client_id, token):
        flow_args = filesystem.RecursiveListDirectoryArgs()

        return flow.GRRFlow.StartFlow(
            client_id=client_id,
            flow_name=filesystem.RecursiveListDirectory.__name__,
            args=flow_args,
            token=token)

    def CreateMultiGetFileFlow(self, client_id, file_path, token):
        pathspec = rdf_paths.PathSpec(path=file_path,
                                      pathtype=rdf_paths.PathSpec.PathType.OS)
        flow_args = transfer.MultiGetFileArgs(pathspecs=[pathspec])

        return flow.GRRFlow.StartFlow(client_id=client_id,
                                      flow_name=transfer.MultiGetFile.__name__,
                                      args=flow_args,
                                      token=token)
Exemplo n.º 10
0
    def Handle(self, args, token=None):
        start_time = args.start
        end_time = args.end

        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()

        if not start_time:
            start_time = end_time - rdfvalue.Duration("30m")

        fd = aff4.FACTORY.Create(args.client_id.ToClientURN().Add("stats"),
                                 aff4_type=aff4_stats.ClientStats,
                                 mode="r",
                                 token=token,
                                 age=(start_time, end_time))

        stat_values = list(fd.GetValuesForAttribute(fd.Schema.STATS))
        points = []
        for stat_value in reversed(stat_values):
            if args.metric == args.Metric.CPU_PERCENT:
                points.extend((s.cpu_percent, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_SYSTEM:
                points.extend((s.system_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.CPU_USER:
                points.extend((s.user_cpu_time, s.timestamp)
                              for s in stat_value.cpu_samples)
            elif args.metric == args.Metric.IO_READ_BYTES:
                points.extend(
                    (s.read_bytes, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_BYTES:
                points.extend((s.write_bytes, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_READ_OPS:
                points.extend(
                    (s.read_count, s.timestamp) for s in stat_value.io_samples)
            elif args.metric == args.Metric.IO_WRITE_OPS:
                points.extend((s.write_count, s.timestamp)
                              for s in stat_value.io_samples)
            elif args.metric == args.Metric.NETWORK_BYTES_RECEIVED:
                points.append((stat_value.bytes_received, stat_value.age))
            elif args.metric == args.Metric.NETWORK_BYTES_SENT:
                points.append((stat_value.bytes_sent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_PERCENT:
                points.append((stat_value.memory_percent, stat_value.age))
            elif args.metric == args.Metric.MEMORY_RSS_SIZE:
                points.append((stat_value.RSS_size, stat_value.age))
            elif args.metric == args.Metric.MEMORY_VMS_SIZE:
                points.append((stat_value.VMS_size, stat_value.age))
            else:
                raise ValueError("Unknown metric.")

        # Points collected from "cpu_samples" and "io_samples" may not be correctly
        # sorted in some cases (as overlaps between different stat_values are
        # possible).
        points.sort(key=lambda x: x[1])

        ts = timeseries.Timeseries()
        ts.MultiAppend(points)

        if args.metric not in self.GAUGE_METRICS:
            ts.MakeIncreasing()

        if len(stat_values) > self.MAX_SAMPLES:
            sampling_interval = rdfvalue.Duration.FromSeconds(
                ((end_time - start_time).seconds / self.MAX_SAMPLES) or 1)
            if args.metric in self.GAUGE_METRICS:
                mode = timeseries.NORMALIZE_MODE_GAUGE
            else:
                mode = timeseries.NORMALIZE_MODE_COUNTER

            ts.Normalize(sampling_interval, start_time, end_time, mode=mode)

        result = ApiGetClientLoadStatsResult()
        for value, timestamp in ts.data:
            dp = api_stats.ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                        value=value)
            result.data_points.append(dp)

        return result
Exemplo n.º 11
0
    def testStopping(self):
        """Tests if we can stop a hunt."""

        foreman = aff4.FACTORY.Open("aff4:/foreman",
                                    mode="rw",
                                    token=self.token)
        rules = foreman.Get(foreman.Schema.RULES)

        # Make sure there are no rules yet.
        self.assertEqual(len(rules), 0)
        now = rdfvalue.RDFDatetime.Now()
        expires = rdfvalue.Duration("1h").Expiry()
        # Add some rules.
        rules = [
            foreman_rules.ForemanRule(created=now,
                                      expires=expires,
                                      description="Test rule1"),
            foreman_rules.ForemanRule(created=now,
                                      expires=expires,
                                      description="Test rule2")
        ]
        self.AddForemanRules(rules)

        client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
            foreman_rules.ForemanClientRule(
                rule_type=foreman_rules.ForemanClientRule.Type.REGEX,
                regex=foreman_rules.ForemanRegexClientRule(
                    field="CLIENT_NAME", attribute_regex="HUNT")),
            foreman_rules.ForemanClientRule(
                rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
                integer=foreman_rules.ForemanIntegerClientRule(
                    field="CLIENT_CLOCK",
                    operator=foreman_rules.ForemanIntegerClientRule.Operator.
                    GREATER_THAN,
                    value=1336650631137737))
        ])

        hunt = implementation.GRRHunt.StartHunt(
            hunt_name=standard.SampleHunt.__name__,
            client_rule_set=client_rule_set,
            client_rate=0,
            token=self.token)

        with hunt:
            runner = hunt.GetRunner()
            runner.Start()

            # Add some more rules.
            rules = [
                foreman_rules.ForemanRule(created=now,
                                          expires=expires,
                                          description="Test rule3"),
                foreman_rules.ForemanRule(created=now,
                                          expires=expires,
                                          description="Test rule4")
            ]
            self.AddForemanRules(rules)

            foreman = aff4.FACTORY.Open("aff4:/foreman",
                                        mode="rw",
                                        token=self.token)
            rules = foreman.Get(foreman.Schema.RULES)
            self.assertEqual(len(rules), 5)

            # It should be running.
            self.assertTrue(runner.IsHuntStarted())

            # Now we stop the hunt.
            hunt.Stop()

        foreman = aff4.FACTORY.Open("aff4:/foreman",
                                    mode="rw",
                                    token=self.token)
        rules = foreman.Get(foreman.Schema.RULES)
        # The rule for this hunt should be deleted but the rest should be there.
        self.assertEqual(len(rules), 4)

        # And the hunt should report no outstanding requests any more.
        with hunt:
            self.assertFalse(hunt.GetRunner().IsHuntStarted())
Exemplo n.º 12
0
    def testClientMessageLeasing(self):

        client_id = self.InitializeClient()
        messages = [
            rdf_flows.GrrMessage(queue=client_id, generate_task_id=True)
            for _ in range(10)
        ]
        lease_time = rdfvalue.Duration("5m")

        self.db.WriteClientMessages(messages)

        t0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000)
        with test_lib.FakeTime(t0):
            t0_expiry = t0 + lease_time
            leased = self.db.LeaseClientMessages(client_id,
                                                 lease_time=lease_time,
                                                 limit=5)

            self.assertEqual(len(leased), 5)

            for request in leased:
                self.assertEqual(request.leased_until, t0_expiry)
                self.assertEqual(request.leased_by, utils.ProcessIdString())

        t1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 100)
        with test_lib.FakeTime(t1):
            t1_expiry = t1 + lease_time
            leased = self.db.LeaseClientMessages(client_id,
                                                 lease_time=lease_time,
                                                 limit=5)

            self.assertEqual(len(leased), 5)

            for request in leased:
                self.assertEqual(request.leased_until, t1_expiry)
                self.assertEqual(request.leased_by, utils.ProcessIdString())

            # Nothing left to lease.
            leased = self.db.LeaseClientMessages(client_id,
                                                 lease_time=lease_time,
                                                 limit=2)

            self.assertEqual(len(leased), 0)

        read = self.db.ReadClientMessages(client_id)

        self.assertEqual(len(read), 10)
        for r in read:
            self.assertEqual(r.leased_by, utils.ProcessIdString())

        self.assertEqual(len([r for r in read if r.leased_until == t0_expiry]),
                         5)
        self.assertEqual(len([r for r in read if r.leased_until == t1_expiry]),
                         5)

        # Half the leases expired.
        t2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 350)
        with test_lib.FakeTime(t2):
            leased = self.db.LeaseClientMessages(client_id,
                                                 lease_time=lease_time)

            self.assertEqual(len(leased), 5)

        # All of them expired.
        t3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 10350)
        with test_lib.FakeTime(t3):
            leased = self.db.LeaseClientMessages(client_id,
                                                 lease_time=lease_time)

            self.assertEqual(len(leased), 10)
Exemplo n.º 13
0
class GRRWorker(object):
  """A GRR worker."""

  # time to wait before polling when no jobs are currently in the
  # task scheduler (sec)
  POLLING_INTERVAL = 2
  SHORT_POLLING_INTERVAL = 0.3
  SHORT_POLL_TIME = 30

  # Time to wait between trying to lease message handlers.
  MH_LEASE_INTERVAL = rdfvalue.Duration("15s")

  # target maximum time to spend on RunOnce
  RUN_ONCE_MAX_SECONDS = 300

  # A class global threadpool to be used for all workers.
  thread_pool = None

  # Duration of a flow lease time in seconds.
  flow_lease_time = 3600
  # Duration of a well known flow lease time in seconds.
  well_known_flow_lease_time = rdfvalue.Duration("600s")

  def __init__(self,
               queues=queues_config.WORKER_LIST,
               threadpool_prefix="grr_threadpool",
               threadpool_size=None,
               token=None):
    """Constructor.

    Args:
      queues: The queues we use to fetch new messages from.
      threadpool_prefix: A name for the thread pool used by this worker.
      threadpool_size: The number of workers to start in this thread pool.
      token: The token to use for the worker.

    Raises:
      RuntimeError: If the token is not provided.
    """
    logging.info("started worker with queues: %s", str(queues))
    self.queues = queues

    # self.queued_flows is a timed cache of locked flows. If this worker
    # encounters a lock failure on a flow, it will not attempt to grab this flow
    # until the timeout.
    self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60)

    if token is None:
      raise RuntimeError("A valid ACLToken is required.")

    # Make the thread pool a global so it can be reused for all workers.
    if self.__class__.thread_pool is None:
      if threadpool_size is None:
        threadpool_size = config.CONFIG["Threadpool.size"]

      self.__class__.thread_pool = threadpool.ThreadPool.Factory(
          threadpool_prefix, min_threads=2, max_threads=threadpool_size)

      self.__class__.thread_pool.Start()

    self.token = token
    self.last_active = 0
    self.last_mh_lease_attempt = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)

    # Well known flows are just instantiated.
    self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)

  def Run(self):
    """Event loop."""
    try:
      while 1:
        if master.MASTER_WATCHER.IsMaster():
          processed = self.RunOnce()
        else:
          processed = 0
          time.sleep(60)

        if processed == 0:
          if time.time() - self.last_active > self.SHORT_POLL_TIME:
            interval = self.POLLING_INTERVAL
          else:
            interval = self.SHORT_POLLING_INTERVAL

          time.sleep(interval)
        else:
          self.last_active = time.time()

    except KeyboardInterrupt:
      logging.info("Caught interrupt, exiting.")
      self.__class__.thread_pool.Join()

  def _ProcessMessageHandlerRequests(self):
    """Processes message handler requests."""

    if not data_store.RelationalDBReadEnabled(category="message_handlers"):
      return 0

    now = rdfvalue.RDFDatetime.Now()
    if now - self.last_mh_lease_attempt < self.MH_LEASE_INTERVAL:
      return 0

    self.last_mh_lease_attempt = now

    requests = data_store.REL_DB.LeaseMessageHandlerRequests(
        lease_time=self.well_known_flow_lease_time, limit=1000)
    if not requests:
      return 0

    logging.debug("Leased message handler request ids: %s", ",".join(
        str(r.request_id) for r in requests))
    grouped_requests = utils.GroupBy(requests, lambda r: r.handler_name)
    for handler_name, requests_for_handler in grouped_requests.items():
      handler_cls = handler_registry.handler_name_map.get(handler_name)
      if not handler_cls:
        logging.error("Unknown message handler: %s", handler_name)
        continue

      try:
        logging.debug("Running %d messages for handler %s",
                      len(requests_for_handler), handler_name)
        handler_cls(token=self.token).ProcessMessages(requests_for_handler)
      except Exception:  # pylint: disable=broad-except
        logging.exception("Exception while processing message handler %s",
                          handler_name)

    logging.debug("Deleting message handler request ids: %s", ",".join(
        str(r.request_id) for r in requests))
    data_store.REL_DB.DeleteMessageHandlerRequests(requests)
    return len(requests)

  def RunOnce(self):
    """Processes one set of messages from Task Scheduler.

    The worker processes new jobs from the task master. For each job
    we retrieve the session from the Task Scheduler.

    Returns:
        Total number of messages processed by this call.
    """
    start_time = time.time()
    processed = self._ProcessMessageHandlerRequests()

    queue_manager = queue_manager_lib.QueueManager(token=self.token)
    for queue in self.queues:
      # Freezeing the timestamp used by queue manager to query/delete
      # notifications to avoid possible race conditions.
      queue_manager.FreezeTimestamp()

      fetch_messages_start = time.time()
      notifications_by_priority = queue_manager.GetNotificationsByPriority(
          queue)
      stats.STATS.RecordEvent("worker_time_to_retrieve_notifications",
                              time.time() - fetch_messages_start)

      # Process stuck flows first
      stuck_flows = notifications_by_priority.pop(queue_manager.STUCK_PRIORITY,
                                                  [])

      if stuck_flows:
        self.ProcessStuckFlows(stuck_flows, queue_manager)

      notifications_available = []
      for priority in sorted(notifications_by_priority, reverse=True):
        for notification in notifications_by_priority[priority]:
          # Filter out session ids we already tried to lock but failed.
          if notification.session_id not in self.queued_flows:
            notifications_available.append(notification)

      try:
        # If we spent too much time processing what we have so far, the
        # active_sessions list might not be current. We therefore break here
        # so we can re-fetch a more up to date version of the list, and try
        # again later. The risk with running with an old active_sessions list
        # is that another worker could have already processed this message,
        # and when we try to process it, there is nothing to do - costing us a
        # lot of processing time. This is a tradeoff between checking the data
        # store for current information and processing out of date
        # information.
        processed += self.ProcessMessages(
            notifications_available, queue_manager,
            self.RUN_ONCE_MAX_SECONDS - (time.time() - start_time))

      # We need to keep going no matter what.
      except Exception as e:  # pylint: disable=broad-except
        logging.error("Error processing message %s. %s.", e,
                      traceback.format_exc())
        stats.STATS.IncrementCounter("grr_worker_exceptions")
        if flags.FLAGS.debug:
          pdb.post_mortem()

      queue_manager.UnfreezeTimestamp()
      # If we have spent too much time, stop.
      if (time.time() - start_time) > self.RUN_ONCE_MAX_SECONDS:
        return processed
    return processed

  def ProcessStuckFlows(self, stuck_flows, queue_manager):
    stats.STATS.IncrementCounter("grr_flows_stuck", len(stuck_flows))

    for stuck_flow in stuck_flows:
      try:
        flow.GRRFlow.TerminateFlow(
            stuck_flow.session_id,
            reason="Stuck in the worker",
            status=rdf_flows.GrrStatus.ReturnedStatus.WORKER_STUCK,
            force=True,
            token=self.token)
      except Exception:  # pylint: disable=broad-except
        logging.exception("Error terminating stuck flow: %s", stuck_flow)
      finally:
        # Remove notifications for this flow. This will also remove the
        # "stuck flow" notification itself.
        queue_manager.DeleteNotification(stuck_flow.session_id)

  def ProcessMessages(self, active_notifications, queue_manager, time_limit=0):
    """Processes all the flows in the messages.

    Precondition: All tasks come from the same queue.

    Note that the server actually completes the requests in the
    flow when receiving the messages from the client. We do not really
    look at the messages here at all any more - we just work from the
    completed messages in the flow RDFValue.

    Args:
        active_notifications: The list of notifications.
        queue_manager: QueueManager object used to manage notifications,
                       requests and responses.
        time_limit: If set return as soon as possible after this many seconds.

    Returns:
        The number of processed flows.
    """
    now = time.time()
    processed = 0
    for notification in active_notifications:
      if notification.session_id not in self.queued_flows:
        if time_limit and time.time() - now > time_limit:
          break

        processed += 1
        self.queued_flows.Put(notification.session_id, 1)
        self.__class__.thread_pool.AddTask(
            target=self._ProcessMessages,
            args=(notification, queue_manager.Copy()),
            name=self.__class__.__name__)

    return processed

  def _ProcessRegularFlowMessages(self, flow_obj, notification):
    """Processes messages for a given flow."""
    session_id = notification.session_id
    if not isinstance(flow_obj, flow.FlowBase):
      logging.warn("%s is not a proper flow object (got %s)", session_id,
                   type(flow_obj))

      stats.STATS.IncrementCounter(
          "worker_bad_flow_objects", fields=[str(type(flow_obj))])
      raise FlowProcessingError("Not a GRRFlow.")

    runner = flow_obj.GetRunner()
    try:
      runner.ProcessCompletedRequests(notification, self.__class__.thread_pool)
    except Exception as e:  # pylint: disable=broad-except
      # Something went wrong - log it in the flow.
      runner.context.state = rdf_flow_runner.FlowContext.State.ERROR
      runner.context.backtrace = traceback.format_exc()
      logging.error("Flow %s: %s", flow_obj, e)
      raise FlowProcessingError(e)

  def _ProcessMessages(self, notification, queue_manager):
    """Does the real work with a single flow."""
    flow_obj = None
    session_id = notification.session_id

    try:
      # Take a lease on the flow:
      flow_name = session_id.FlowName()
      if flow_name in self.well_known_flows:
        # Well known flows are not necessarily present in the data store so
        # we need to create them instead of opening.
        expected_flow = self.well_known_flows[flow_name].__class__
        flow_obj = aff4.FACTORY.CreateWithLock(
            session_id,
            expected_flow,
            lease_time=self.well_known_flow_lease_time,
            blocking=False,
            token=self.token)
      else:
        flow_obj = aff4.FACTORY.OpenWithLock(
            session_id,
            lease_time=self.flow_lease_time,
            blocking=False,
            token=self.token)

      now = time.time()
      logging.debug("Got lock on %s", session_id)

      # If we get here, we now own the flow. We can delete the notifications
      # we just retrieved but we need to make sure we don't delete any that
      # came in later.
      queue_manager.DeleteNotification(session_id, end=notification.timestamp)

      if flow_name in self.well_known_flows:
        stats.STATS.IncrementCounter(
            "well_known_flow_requests", fields=[str(session_id)])

        # We remove requests first and then process them in the thread pool.
        # On one hand this approach increases the risk of losing requests in
        # case the worker process dies. On the other hand, it doesn't hold
        # the lock while requests are processed, so other workers can
        # process well known flows requests as well.
        with flow_obj:
          responses = flow_obj.FetchAndRemoveRequestsAndResponses(session_id)

        flow_obj.ProcessResponses(responses, self.__class__.thread_pool)

      else:
        with flow_obj:
          self._ProcessRegularFlowMessages(flow_obj, notification)

      elapsed = time.time() - now
      logging.debug("Done processing %s: %s sec", session_id, elapsed)
      stats.STATS.RecordEvent(
          "worker_flow_processing_time", elapsed, fields=[flow_obj.Name()])

      # Everything went well -> session can be run again.
      self.queued_flows.ExpireObject(session_id)

    except aff4.LockError:
      # Another worker is dealing with this flow right now, we just skip it.
      # We expect lots of these when there are few messages (the system isn't
      # highly loaded) but it is interesting when the system is under load to
      # know if we are pulling the optimal number of messages off the queue.
      # A high number of lock fails when there is plenty of work to do would
      # indicate we are wasting time trying to process work that has already
      # been completed by other workers.
      stats.STATS.IncrementCounter("worker_flow_lock_error")

    except FlowProcessingError:
      # Do nothing as we expect the error to be correctly logged and accounted
      # already.
      pass

    except Exception as e:  # pylint: disable=broad-except
      # Something went wrong when processing this session. In order not to spin
      # here, we just remove the notification.
      logging.exception("Error processing session %s: %s", session_id, e)
      stats.STATS.IncrementCounter(
          "worker_session_errors", fields=[str(type(e))])
      queue_manager.DeleteNotification(session_id)
Exemplo n.º 14
0
 def testFloorToMinutes(self):
     datetime = rdfvalue.RDFDatetime.FromHumanReadable(
         "2011-11-11 12:34:56")
     expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
     self.assertEqual(datetime.Floor(rdfvalue.Duration("60s")), expected)
Exemplo n.º 15
0
    def testIntegerComparisons(self):
        """Tests that we can use integer matching rules on the foreman."""

        base_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
            1336480583.077736)
        boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
            1336300000.000000)

        self.SetupTestClientObject(0x11,
                                   system="Windows XP",
                                   install_time=base_time)
        self.SetupTestClientObject(0x12,
                                   system="Windows 7",
                                   install_time=base_time)
        # This one was installed one week earlier.
        one_week_ago = base_time - rdfvalue.Duration("1w")
        self.SetupTestClientObject(0x13,
                                   system="Windows 7",
                                   install_time=one_week_ago)
        self.SetupTestClientObject(0x14,
                                   system="Windows 7",
                                   last_boot_time=boot_time)

        with utils.Stubber(implementation.GRRHunt, "StartClients",
                           self.StartClients):
            now = rdfvalue.RDFDatetime.Now()
            expiration_time = now + rdfvalue.Duration("1h")

            # Make a new rule
            rule = foreman_rules.ForemanCondition(
                creation_time=now,
                expiration_time=expiration_time,
                description="Test rule(old)",
                hunt_name=standard.GenericHunt.__name__,
                hunt_id="H:111111")

            # Matches the old client
            one_hour_ago = base_time - rdfvalue.Duration("1h")
            rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
                foreman_rules.ForemanClientRule(
                    rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
                    integer=foreman_rules.ForemanIntegerClientRule(
                        field="INSTALL_TIME",
                        operator=foreman_rules.ForemanIntegerClientRule.
                        Operator.LESS_THAN,
                        value=one_hour_ago.AsSecondsSinceEpoch()))
            ])

            data_store.REL_DB.WriteForemanRule(rule)

            # Make a new rule
            rule = foreman_rules.ForemanCondition(
                creation_time=now,
                expiration_time=expiration_time,
                description="Test rule(new)",
                hunt_name=standard.GenericHunt.__name__,
                hunt_id="H:222222")

            # Matches the newer clients
            rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
                foreman_rules.ForemanClientRule(
                    rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
                    integer=foreman_rules.ForemanIntegerClientRule(
                        field="INSTALL_TIME",
                        operator=foreman_rules.ForemanIntegerClientRule.
                        Operator.GREATER_THAN,
                        value=one_hour_ago.AsSecondsSinceEpoch()))
            ])

            data_store.REL_DB.WriteForemanRule(rule)

            # Make a new rule
            rule = foreman_rules.ForemanCondition(
                creation_time=now,
                expiration_time=expiration_time,
                description="Test rule(eq)",
                hunt_name=standard.GenericHunt.__name__,
                hunt_id="H:333333")

            # Note that this also tests the handling of nonexistent attributes.
            rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
                foreman_rules.ForemanClientRule(
                    rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
                    integer=foreman_rules.ForemanIntegerClientRule(
                        field="LAST_BOOT_TIME",
                        operator="EQUAL",
                        value=boot_time.AsSecondsSinceEpoch()))
            ])

            data_store.REL_DB.WriteForemanRule(rule)

            foreman_obj = foreman.GetForeman()

            self.clients_started = []
            foreman_obj.AssignTasksToClient("C.1000000000000011")
            foreman_obj.AssignTasksToClient("C.1000000000000012")
            foreman_obj.AssignTasksToClient("C.1000000000000013")
            foreman_obj.AssignTasksToClient("C.1000000000000014")

            # Make sure that the clients ran the correct flows.
            self.assertEqual(len(self.clients_started), 4)
            self.assertEqual(self.clients_started[0][1], "C.1000000000000011")
            self.assertEqual("H:222222", self.clients_started[0][0].Basename())
            self.assertEqual(self.clients_started[1][1], "C.1000000000000012")
            self.assertEqual("H:222222", self.clients_started[1][0].Basename())
            self.assertEqual(self.clients_started[2][1], "C.1000000000000013")
            self.assertEqual("H:111111", self.clients_started[2][0].Basename())
            self.assertEqual(self.clients_started[3][1], "C.1000000000000014")
            self.assertEqual("H:333333", self.clients_started[3][0].Basename())
Exemplo n.º 16
0
    def testMessageHandlerRequestLeasing(self):

        requests = [
            rdf_objects.MessageHandlerRequest(client_id="C.1000000000000000",
                                              handler_name="Testhandler",
                                              request_id=i * 100,
                                              request=rdfvalue.RDFInteger(i))
            for i in range(10)
        ]
        lease_time = rdfvalue.Duration("5m")

        with test_lib.FakeTime(
                rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10000)):
            self.db.WriteMessageHandlerRequests(requests)

        t0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000)
        with test_lib.FakeTime(t0):
            t0_expiry = t0 + lease_time
            leased = self.db.LeaseMessageHandlerRequests(lease_time=lease_time,
                                                         limit=5)

            self.assertEqual(len(leased), 5)

            for request in leased:
                self.assertEqual(request.leased_until, t0_expiry)
                self.assertEqual(request.leased_by, utils.ProcessIdString())

        t1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 100)
        with test_lib.FakeTime(t1):
            t1_expiry = t1 + lease_time
            leased = self.db.LeaseMessageHandlerRequests(lease_time=lease_time,
                                                         limit=5)

            self.assertEqual(len(leased), 5)

            for request in leased:
                self.assertEqual(request.leased_until, t1_expiry)
                self.assertEqual(request.leased_by, utils.ProcessIdString())

            # Nothing left to lease.
            leased = self.db.LeaseMessageHandlerRequests(lease_time=lease_time,
                                                         limit=2)

            self.assertEqual(len(leased), 0)

        read = self.db.ReadMessageHandlerRequests()

        self.assertEqual(len(read), 10)
        for r in read:
            self.assertEqual(r.leased_by, utils.ProcessIdString())

        self.assertEqual(len([r for r in read if r.leased_until == t0_expiry]),
                         5)
        self.assertEqual(len([r for r in read if r.leased_until == t1_expiry]),
                         5)

        # Half the leases expired.
        t2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 350)
        with test_lib.FakeTime(t2):
            leased = self.db.LeaseMessageHandlerRequests(lease_time=lease_time)

            self.assertEqual(len(leased), 5)

        # All of them expired.
        t3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100000 + 10350)
        with test_lib.FakeTime(t3):
            leased = self.db.LeaseMessageHandlerRequests(lease_time=lease_time)

            self.assertEqual(len(leased), 10)
Exemplo n.º 17
0
    def testIntegerComparisons(self):
        """Tests that we can use integer matching rules on the foreman."""

        base_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
            1336480583.077736)
        boot_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
            1336300000.000000)

        self.SetupClient(0x11, system="Windows XP", install_time=base_time)
        self.SetupClient(0x12, system="Windows 7", install_time=base_time)
        # This one was installed one week earlier.
        one_week_ago = base_time - rdfvalue.Duration("1w")
        self.SetupClient(0x13, system="Windows 7", install_time=one_week_ago)
        self.SetupClient(0x14, system="Windows 7", last_boot_time=boot_time)

        with utils.Stubber(flow.GRRFlow, "StartFlow", self.StartFlow):
            # Now setup the filters
            now = rdfvalue.RDFDatetime.Now()
            expires = now + rdfvalue.Duration("1h")
            foreman_obj = foreman.GetForeman(token=self.token)

            # Make a new rule
            rule = foreman_rules.ForemanRule(created=now,
                                             expires=expires,
                                             description="Test rule(old)")

            # Matches the old client
            one_hour_ago = base_time - rdfvalue.Duration("1h")
            rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
                foreman_rules.ForemanClientRule(
                    rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
                    integer=foreman_rules.ForemanIntegerClientRule(
                        field="INSTALL_TIME",
                        operator=foreman_rules.ForemanIntegerClientRule.
                        Operator.LESS_THAN,
                        value=one_hour_ago.AsSecondsSinceEpoch()))
            ])

            old_flow = "Test flow for old clients"
            # Will run Test Flow
            rule.actions.Append(flow_name=old_flow,
                                argv=rdf_protodict.Dict(dict(foo="bar")))

            # Clear the rule set and add the new rule to it.
            rule_set = foreman_obj.Schema.RULES()
            rule_set.Append(rule)

            # Make a new rule
            rule = foreman_rules.ForemanRule(created=now,
                                             expires=expires,
                                             description="Test rule(new)")

            # Matches the newer clients
            rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
                foreman_rules.ForemanClientRule(
                    rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
                    integer=foreman_rules.ForemanIntegerClientRule(
                        field="INSTALL_TIME",
                        operator=foreman_rules.ForemanIntegerClientRule.
                        Operator.GREATER_THAN,
                        value=one_hour_ago.AsSecondsSinceEpoch()))
            ])

            new_flow = "Test flow for newer clients"

            # Will run Test Flow
            rule.actions.Append(flow_name=new_flow,
                                argv=rdf_protodict.Dict(dict(foo="bar")))

            rule_set.Append(rule)

            # Make a new rule
            rule = foreman_rules.ForemanRule(created=now,
                                             expires=expires,
                                             description="Test rule(eq)")

            # Note that this also tests the handling of nonexistent attributes.
            rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
                foreman_rules.ForemanClientRule(
                    rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
                    integer=foreman_rules.ForemanIntegerClientRule(
                        field="LAST_BOOT_TIME",
                        operator="EQUAL",
                        value=boot_time.AsSecondsSinceEpoch()))
            ])

            eq_flow = "Test flow for LAST_BOOT_TIME"

            rule.actions.Append(flow_name=eq_flow,
                                argv=rdf_protodict.Dict(dict(foo="bar")))

            rule_set.Append(rule)

            # Assign it to the foreman
            foreman_obj.Set(foreman_obj.Schema.RULES, rule_set)
            foreman_obj.Close()

            self.clients_launched = []
            foreman_obj.AssignTasksToClient("C.1000000000000011")
            foreman_obj.AssignTasksToClient("C.1000000000000012")
            foreman_obj.AssignTasksToClient("C.1000000000000013")
            foreman_obj.AssignTasksToClient("C.1000000000000014")

            # Make sure that the clients ran the correct flows.
            self.assertEqual(len(self.clients_launched), 4)
            self.assertEqual(self.clients_launched[0][0],
                             rdf_client.ClientURN("C.1000000000000011"))
            self.assertEqual(self.clients_launched[0][1], new_flow)
            self.assertEqual(self.clients_launched[1][0],
                             rdf_client.ClientURN("C.1000000000000012"))
            self.assertEqual(self.clients_launched[1][1], new_flow)
            self.assertEqual(self.clients_launched[2][0],
                             rdf_client.ClientURN("C.1000000000000013"))
            self.assertEqual(self.clients_launched[2][1], old_flow)
            self.assertEqual(self.clients_launched[3][0],
                             rdf_client.ClientURN("C.1000000000000014"))
            self.assertEqual(self.clients_launched[3][1], eq_flow)
Exemplo n.º 18
0
class VerifyHuntOutputPluginsCronFlow(cronjobs.SystemCronFlow):
  """Runs Verify() method of output plugins of active hunts."""

  frequency = rdfvalue.Duration("4h")
  lifetime = rdfvalue.Duration("4h")

  args_type = VerifyHuntOutputPluginsCronFlowArgs

  NON_VERIFIABLE = "NON_VERIFIABLE"

  def _GroupHuntsAndPluginsByVerifiers(self, hunts):
    """Opens hunts results metadata in bulk and groups the by verifier type.

    We've traded simplicity for performance here. Initial implementations of
    VerifyHuntOutputPluginsCronFlow checked the hunts one-by-one, but that
    turned out to be too slow and inefficient when many hunts had to be
    checked. To make the checks more effective, MultiVerifyHuntOutput()
    method was introduced in the verifiers API.

    It's this cron flow's responsibility to group the plugin objects by
    verifier type, so that we can feed them to MultiVerifyHuntOutput.

    Args:
      hunts: A list of GRRHunt objects.

    Returns:
      A dictionary where keys are verifier classes and values are lists of
      tuples (plugin id, plugin descriptor, plugin object, hunt object).
      Special constant NON_VERIFIABLE is used as a key for plugins that
      have no corresponding verifier.
    """
    hunts_by_urns = {}
    for hunt in hunts:
      hunts_by_urns[hunt.urn] = hunt

    results_metadata_urns = [hunt.results_metadata_urn for hunt in hunts]
    results_metadata_objects = aff4.FACTORY.MultiOpen(
        results_metadata_urns,
        aff4_type=implementation.HuntResultsMetadata,
        token=self.token)

    results = {}
    for mdata in results_metadata_objects:
      hunt_urn = rdfvalue.RDFURN(mdata.urn.Dirname())
      hunt = hunts_by_urns[hunt_urn]

      for plugin_id, (plugin_descriptor, plugin_state) in mdata.Get(
          mdata.Schema.OUTPUT_PLUGINS, {}).items():

        plugin_obj = plugin_descriptor.GetPluginForState(plugin_state)
        plugin_verifiers_classes = plugin_descriptor.GetPluginVerifiersClasses()

        if not plugin_verifiers_classes:
          results.setdefault(self.NON_VERIFIABLE, []).append(
              (plugin_id, plugin_descriptor, plugin_obj, hunt))
        else:
          for cls in plugin_verifiers_classes:
            results.setdefault(cls, []).append((plugin_id, plugin_descriptor,
                                                plugin_obj, hunt))

    return results

  def _FillResult(self, result, plugin_id, plugin_descriptor):
    result.timestamp = rdfvalue.RDFDatetime.Now()
    result.plugin_id = plugin_id
    result.plugin_descriptor = plugin_descriptor
    return result

  def _VerifyHunts(self, hunts_plugins_by_verifier):
    results_by_hunt = {}

    errors = []
    for verifier_cls, hunts_plugins in hunts_plugins_by_verifier.items():

      if verifier_cls == self.NON_VERIFIABLE:
        for plugin_id, plugin_descriptor, plugin_obj, hunt in hunts_plugins:
          result = output_plugin.OutputPluginVerificationResult(
              status=output_plugin.OutputPluginVerificationResult.Status.N_A,
              status_message=("Plugin %s is not verifiable." %
                              plugin_obj.__class__.__name__))
          self._FillResult(result, plugin_id, plugin_descriptor)

          results_by_hunt.setdefault(hunt.urn, []).append(result)
          stats.STATS.IncrementCounter(
              "hunt_output_plugin_verifications",
              fields=[utils.SmartStr(result.status)])
        continue

      verifier = verifier_cls()

      plugins_hunts_pairs = []
      for plugin_id, plugin_descriptor, plugin_obj, hunt in hunts_plugins:
        plugins_hunts_pairs.append((plugin_obj, hunt))

      try:
        for hunt_urn, result in verifier.MultiVerifyHuntOutput(
            plugins_hunts_pairs):
          self._FillResult(result, plugin_id, plugin_descriptor)

          results_by_hunt.setdefault(hunt.urn, []).append(result)
          stats.STATS.IncrementCounter(
              "hunt_output_plugin_verifications",
              fields=[utils.SmartStr(result.status)])

      except output_plugin.MultiVerifyHuntOutputError as e:
        logging.exception(e)

        errors.extend(e.errors)
        stats.STATS.IncrementCounter(
            "hunt_output_plugin_verification_errors", delta=len(e.errors))

    for hunt_urn, results in results_by_hunt.items():
      yield hunt_urn, results

    if errors:
      raise MultiHuntVerificationSummaryError(errors)

  def _WriteVerificationResults(self, hunt_urn, results):
    with aff4.FACTORY.Create(
        hunt_urn.Add("ResultsMetadata"),
        aff4_type=implementation.HuntResultsMetadata,
        mode="w",
        token=self.token) as results_metadata:
      results_metadata.Set(
          results_metadata.Schema.OUTPUT_PLUGINS_VERIFICATION_RESULTS,
          output_plugin.OutputPluginVerificationResultsList(results=results))

  @flow.StateHandler()
  def Start(self):
    hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)

    if not self.args.check_range:
      self.args.check_range = rdfvalue.Duration(
          "%ds" % int(self.__class__.frequency.seconds * 2))

    range_end = rdfvalue.RDFDatetime.Now()
    range_start = rdfvalue.RDFDatetime.Now() - self.args.check_range

    children_urns = list(hunts_root.ListChildren(age=(range_start, range_end)))
    children_urns.sort(key=operator.attrgetter("age"), reverse=True)

    self.Log("Will verify %d hunts." % len(children_urns))

    hunts_to_process = []
    for hunt in hunts_root.OpenChildren(children_urns):
      # Skip non-GenericHunts.
      if not isinstance(hunt, GenericHunt):
        self.Log("Skipping: %s." % utils.SmartStr(hunt.urn))
        continue

      hunts_to_process.append(hunt)

    hunts_by_verifier = self._GroupHuntsAndPluginsByVerifiers(hunts_to_process)
    for hunt_urn, results in self._VerifyHunts(hunts_by_verifier):
      self._WriteVerificationResults(hunt_urn, results)
Exemplo n.º 19
0
 def testStringRepresentationIsTransitive(self):
     t = rdfvalue.Duration("5m")
     self.assertEqual(t.seconds, 300)
     self.assertEqual(t, rdfvalue.Duration(300))
     self.assertEqual(str(t), "5m")
Exemplo n.º 20
0
    def testModifyHunt(self):
        hunt = self.CreateSampleHunt(stopped=True)

        self.Open("/")
        self.WaitUntil(self.IsElementPresent, "client_query")
        self.Click("css=a[grrtarget=hunts]")
        self.WaitUntil(self.IsTextPresent, "GenericHunt")

        # Select a Hunt.
        self.Click("css=td:contains('GenericHunt')")

        # Click on Modify button and check that dialog appears.
        self.Click("css=button[name=ModifyHunt]")
        self.WaitUntil(self.IsTextPresent, "Modify this hunt")

        expiry_time = rdfvalue.Duration("5m").Expiry().Format("%Y-%m-%d %H:%M")

        self.Type(
            "css=grr-modify-hunt-dialog label:contains('Client limit') ~ * input",
            "4483")
        self.Type(
            "css=grr-modify-hunt-dialog label:contains('Client rate') ~ * input",
            "42")
        self.Type(
            "css=grr-modify-hunt-dialog label:contains('Expires') ~ * input",
            expiry_time)

        # Click on Proceed.
        self.Click("css=button[name=Proceed]")

        # This should be rejected now and a form request is made.
        self.WaitUntil(self.IsTextPresent, "Create a new approval")
        self.Click("css=grr-request-approval-dialog button[name=Cancel]")
        # Wait for dialog to disappear.
        self.WaitUntilNot(self.IsVisible, "css=.modal-open")

        # Now create an approval.
        self.RequestAndGrantHuntApproval(hunt.session_id.Basename())

        # Click on Modify button and check that dialog appears.
        self.Click("css=button[name=ModifyHunt]")
        self.WaitUntil(self.IsTextPresent, "Modify this hunt")

        self.Type(
            "css=grr-modify-hunt-dialog label:contains('Client limit') ~ * input",
            "4483")
        self.Type(
            "css=grr-modify-hunt-dialog label:contains('Client rate') ~ * input",
            "42")
        self.Type(
            "css=grr-modify-hunt-dialog label:contains('Expires') ~ * input",
            expiry_time)

        # Click on "Proceed" and wait for success label to appear.
        # Also check that "Proceed" button gets disabled.
        self.Click("css=button[name=Proceed]")

        self.WaitUntil(self.IsTextPresent, "Hunt modified successfully!")
        self.assertFalse(self.IsElementPresent("css=button[name=Proceed]"))

        # Click on "Cancel" and check that dialog disappears.
        self.Click("css=button[name=Close]")
        self.WaitUntilNot(self.IsVisible, "css=.modal-open")

        # View should be refreshed automatically.
        self.WaitUntil(self.IsTextPresent, "GenericHunt")
        self.WaitUntil(self.IsTextPresent, "4483")
        self.WaitUntil(self.IsTextPresent, expiry_time)
Exemplo n.º 21
0
class ApiDeletePendingUserNotificationHandlerTest(
        api_test_lib.ApiCallHandlerTest):
    """Test for ApiDeletePendingUserNotificationHandler."""

    TIME_0 = rdfvalue.RDFDatetime(42 * rdfvalue.MICROSECONDS)
    TIME_1 = TIME_0 + rdfvalue.Duration("1d")
    TIME_2 = TIME_1 + rdfvalue.Duration("1d")

    def setUp(self):
        super(ApiDeletePendingUserNotificationHandlerTest, self).setUp()
        self.handler = user_plugin.ApiDeletePendingUserNotificationHandler()
        self.client_id = self.SetupClient(0)

        with test_lib.FakeTime(self.TIME_0):
            notification.Notify(
                self.token.username,
                rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED,
                "<some message>",
                rdf_objects.ObjectReference(
                    reference_type=rdf_objects.ObjectReference.Type.CLIENT,
                    client=rdf_objects.ClientReference(
                        client_id=self.client_id.Basename())))

            notification.Notify(
                self.token.username,
                rdf_objects.UserNotification.Type.TYPE_CLIENT_INTERROGATED,
                "<some message with identical time>",
                rdf_objects.ObjectReference(
                    reference_type=rdf_objects.ObjectReference.Type.CLIENT,
                    client=rdf_objects.ClientReference(
                        client_id=self.client_id.Basename())))

        with test_lib.FakeTime(self.TIME_1):
            notification.Notify(
                self.token.username,
                rdf_objects.UserNotification.Type.TYPE_CLIENT_APPROVAL_GRANTED,
                "<some other message>",
                rdf_objects.ObjectReference(
                    reference_type=rdf_objects.ObjectReference.Type.CLIENT,
                    client=rdf_objects.ClientReference(
                        client_id=self.client_id.Basename())))

    def _GetNotifications(self):
        user_record = aff4.FACTORY.Create(aff4.ROOT_URN.Add("users").Add(
            self.token.username),
                                          aff4_type=aff4_users.GRRUser,
                                          mode="r",
                                          token=self.token)

        pending = user_record.Get(user_record.Schema.PENDING_NOTIFICATIONS)
        shown = user_record.Get(user_record.Schema.SHOWN_NOTIFICATIONS)
        return (pending, shown)

    def testDeletesFromPendingAndAddsToShown(self):
        # Check that there are three pending notifications and no shown ones yet.
        (pending, shown) = self._GetNotifications()
        self.assertEqual(len(pending), 3)
        self.assertEqual(len(shown), 0)

        # Delete a pending notification.
        args = user_plugin.ApiDeletePendingUserNotificationArgs(
            timestamp=self.TIME_1)
        self.handler.Handle(args, token=self.token)

        # After the deletion, two notifications should be pending and one shown.
        (pending, shown) = self._GetNotifications()
        self.assertEqual(len(pending), 2)
        self.assertEqual(len(shown), 1)
        self.assertTrue("<some other message>" in shown[0].message)
        self.assertEqual(shown[0].timestamp, self.TIME_1)

    def testRaisesOnDeletingMultipleNotifications(self):
        # Check that there are three pending notifications and no shown ones yet.
        (pending, shown) = self._GetNotifications()
        self.assertEqual(len(pending), 3)
        self.assertEqual(len(shown), 0)

        # Delete all pending notifications on TIME_0.
        args = user_plugin.ApiDeletePendingUserNotificationArgs(
            timestamp=self.TIME_0)
        with self.assertRaises(aff4_users.UniqueKeyError):
            self.handler.Handle(args, token=self.token)

        # Check that the notifications were not changed in the process.
        (pending, shown) = self._GetNotifications()
        self.assertEqual(len(pending), 3)
        self.assertEqual(len(shown), 0)

    def testUnknownTimestampIsIgnored(self):
        # Check that there are three pending notifications and no shown ones yet.
        (pending, shown) = self._GetNotifications()
        self.assertEqual(len(pending), 3)
        self.assertEqual(len(shown), 0)

        # A timestamp not matching any pending notifications does not change any of
        # the collections.
        args = user_plugin.ApiDeletePendingUserNotificationArgs(
            timestamp=self.TIME_2)
        self.handler.Handle(args, token=self.token)

        # We should still have the same number of pending and shown notifications.
        (pending, shown) = self._GetNotifications()
        self.assertEqual(len(pending), 3)
        self.assertEqual(len(shown), 0)
Exemplo n.º 22
0
    def testRefreshFileStartsFlow(self):
        self.Open("/")

        self.Type("client_query", "C.0000000000000001")
        self.Click("client_query_submit")

        self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
                            "css=span[type=subject]")

        # Choose client 1.
        self.Click("css=td:contains('0001')")

        # Go to Browse VFS.
        self.Click("css=a:contains('Browse Virtual Filesystem')")

        self.Click("css=#_fs i.jstree-icon")
        self.Click("css=#_fs-os i.jstree-icon")
        self.Click("css=#_fs-os-c i.jstree-icon")

        # Test file versioning.
        self.WaitUntil(self.IsElementPresent, "css=#_fs-os-c-Downloads")
        self.Click("link=Downloads")

        # Select a file and start a flow by requesting a newer version.
        self.Click("css=tr:contains(\"a.txt\")")
        self.Click("css=li[heading=Download]")
        self.Click("css=button:contains(\"Collect from the client\")")

        # Create a new file version (that would have been created by the flow
        # otherwise) and finish the flow.
        client_id = rdf_client.ClientURN("C.0000000000000001")

        fd = aff4.FACTORY.Open(client_id.Add("flows"), token=self.token)

        # Make sure that the flow has started (when button is clicked, the HTTP
        # API request is sent asynchronously).
        def MultiGetFileStarted():
            return transfer.MultiGetFile.__name__ in list(
                x.__class__.__name__ for x in fd.OpenChildren())

        self.WaitUntil(MultiGetFileStarted)

        flows = list(fd.ListChildren())

        client_mock = action_mocks.MultiGetFileClientMock()
        for flow_urn in flows:
            flow_test_lib.TestFlowHelper(flow_urn,
                                         client_mock,
                                         client_id=client_id,
                                         check_flow_errors=False,
                                         token=self.token)

        time_in_future = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("1h")
        # We have to make sure that the new version will not be within a second
        # from the current one, otherwise the previous one and the new one will
        # be indistinguishable in the UI (as it has a 1s precision when
        # displaying versions).
        with test_lib.FakeTime(time_in_future):
            gui_test_lib.CreateFileVersion(
                rdf_client.ClientURN("C.0000000000000001"),
                "fs/os/c/Downloads/a.txt",
                "The newest version!",
                timestamp=rdfvalue.RDFDatetime.Now(),
                token=self.token)

        # Once the flow has finished, the file view should update and add the
        # newly created, latest version of the file to the list. The selected
        # option should still be "HEAD".
        self.WaitUntilContains("HEAD", self.GetText,
                               "css=.version-dropdown > option[selected]")

        # The file table should also update and display the new timestamp.
        self.WaitUntilContains(gui_test_lib.DateTimeString(time_in_future),
                               self.GetText,
                               "css=.version-dropdown > option:nth(1)")

        # The file table should also update and display the new timestamp.
        self.WaitUntil(
            self.IsElementPresent,
            "css=grr-file-table tbody > tr td:contains(\"%s\")" %
            (gui_test_lib.DateTimeString(time_in_future)))

        # Make sure the file content has changed.
        self.Click("css=li[heading=TextView]")
        self.WaitUntilContains("The newest version!", self.GetText,
                               "css=div.monospace pre")

        # Go to the flow management screen and check that there was a new flow.
        self.Click("css=a:contains('Manage launched flows')")
        self.Click("css=grr-flows-list tr:contains('MultiGetFile')")
        self.WaitUntilContains(transfer.MultiGetFile.__name__, self.GetText,
                               "css=#main_bottomPane")

        self.WaitUntilContains(
            "c/Downloads/a.txt", self.GetText,
            "css=#main_bottomPane table > tbody td.proto_key:contains(\"Path\") "
            "~ td.proto_value")
Exemplo n.º 23
0
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib

flags.DEFINE_string(
    "chrome_driver_path", None,
    "Path to the chrome driver binary. If not set, webdriver "
    "will search on PATH for the binary.")

flags.DEFINE_bool(
    "use_headless_chrome", False, "If set, run Chrome driver in "
    "headless mode. Useful when running tests in a window-manager-less "
    "environment.")

# A increasing sequence of times.
TIME_0 = test_lib.FIXED_TIME
TIME_1 = TIME_0 + rdfvalue.Duration("1d")
TIME_2 = TIME_1 + rdfvalue.Duration("1d")


def DateString(t):
    return t.Format("%Y-%m-%d")


def DateTimeString(t):
    return t.Format("%Y-%m-%d %H:%M:%S")


def CreateFileVersions(client_id, token):
    """Add new versions for a file."""
    # This file already exists in the fixture at TIME_0, we write a
    # later version.
Exemplo n.º 24
0
  def testVersionDropDownChangesFileContentAndDownloads(self):
    """Test the fileview interface."""

    self.Open("/#/clients/%s" % self.client_id)

    # Go to Browse VFS.
    self.Click("css=a[grrtarget='client.vfs']")

    self.Click("css=#_fs i.jstree-icon")
    self.Click("css=#_fs-os i.jstree-icon")
    self.Click("css=#_fs-os-c i.jstree-icon")

    # Test file versioning.
    self.WaitUntil(self.IsElementPresent, "css=#_fs-os-c-Downloads")
    self.Click("link=Downloads")

    # Verify that we have the latest version in the table by default.
    self.assertTrue(
        gui_test_lib.DateString(gui_test_lib.TIME_2) in self.GetText(
            "css=tr:contains(\"a.txt\")"))

    # Click on the row.
    self.Click("css=tr:contains(\"a.txt\")")
    self.WaitUntilContains("a.txt", self.GetText, "css=div#main_bottomPane h1")
    self.WaitUntilContains("HEAD", self.GetText,
                           "css=.version-dropdown > option[selected]")

    self.WaitUntilContains(
        gui_test_lib.DateString(gui_test_lib.TIME_2), self.GetText,
        "css=.version-dropdown > option:nth(1)")

    # Check the data in this file.
    self.Click("css=li[heading=TextView]")
    self.WaitUntilContains("Goodbye World", self.GetText,
                           "css=div.monospace pre")

    downloaded_files = []

    def FakeDownloadHandle(unused_self, args, token=None):
      _ = token  # Avoid unused variable linter warnings.
      aff4_path = args.client_id.ToClientURN().Add(args.file_path)
      age = args.timestamp or aff4.NEWEST_TIME
      downloaded_files.append((aff4_path, age))

      return api_call_handler_base.ApiBinaryStream(
          filename=aff4_path.Basename(), content_generator=xrange(42))

    with utils.Stubber(api_vfs.ApiGetFileBlobHandler, "Handle",
                       FakeDownloadHandle):
      # Try to download the file.
      self.Click("css=li[heading=Download]")

      self.WaitUntilContains(
          gui_test_lib.DateTimeString(gui_test_lib.TIME_2), self.GetText,
          "css=grr-file-download-view")
      self.Click("css=button:contains(\"Download\")")

      # Select the previous version.
      self.Click("css=select.version-dropdown > option:contains(\"%s\")" %
                 gui_test_lib.DateString(gui_test_lib.TIME_1))

      # Now we should have a different time.
      self.WaitUntilContains(
          gui_test_lib.DateTimeString(gui_test_lib.TIME_1), self.GetText,
          "css=grr-file-download-view")
      self.Click("css=button:contains(\"Download\")")

      self.WaitUntil(self.IsElementPresent, "css=li[heading=TextView]")

      # the FakeDownloadHandle method was actually called four times, since
      # a file download first sends a HEAD request to check user access.
      self.WaitUntil(lambda: len(downloaded_files) == 4)

    # Both files should be the same...
    self.assertEqual(downloaded_files[0][0],
                     u"aff4:/%s/fs/os/c/Downloads/a.txt" % self.client_id)
    self.assertEqual(downloaded_files[2][0],
                     u"aff4:/%s/fs/os/c/Downloads/a.txt" % self.client_id)
    # But from different times. The downloaded file timestamp is only accurate
    # to the nearest second. Also, the HEAD version of the file is downloaded
    # with age=NEWEST_TIME.
    self.assertEqual(downloaded_files[0][1], aff4.NEWEST_TIME)
    self.assertAlmostEqual(
        downloaded_files[2][1],
        gui_test_lib.TIME_1,
        delta=rdfvalue.Duration("1s"))

    self.Click("css=li[heading=TextView]")

    # Make sure the file content has changed. This version has "Hello World" in
    # it.
    self.WaitUntilContains("Hello World", self.GetText, "css=div.monospace pre")
Exemplo n.º 25
0
Arquivo: test_lib.py Projeto: qsdj/grr
from grr.server.grr_response_server import aff4
from grr.server.grr_response_server import artifact
from grr.server.grr_response_server import client_index
from grr.server.grr_response_server import data_store
from grr.server.grr_response_server import email_alerts
from grr.server.grr_response_server.aff4_objects import aff4_grr
from grr.server.grr_response_server.aff4_objects import filestore
from grr.server.grr_response_server.aff4_objects import users
from grr.server.grr_response_server.flows.general import audit

from grr.server.grr_response_server.hunts import results as hunts_results
from grr.server.grr_response_server.rdfvalues import objects as rdf_objects

from grr.test_lib import testing_startup

FIXED_TIME = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("8d")
TEST_CLIENT_ID = rdf_client.ClientURN("C.1000000000000000")


class GRRBaseTest(unittest.TestCase):
  """This is the base class for all GRR tests."""

  use_relational_reads = False

  def __init__(self, methodName=None):  # pylint: disable=g-bad-name
    """Hack around unittest's stupid constructor.

    We sometimes need to instantiate the test suite without running any tests -
    e.g. to start initialization or setUp() functions. The unittest constructor
    requires to provide a valid method name.
Exemplo n.º 26
0
class TestSystemCron(aff4_cronjobs.SystemCronFlow):
  frequency = rdfvalue.Duration("10m")
  lifetime = rdfvalue.Duration("12h")
Exemplo n.º 27
0
def main(argv):
    """Main."""
    del argv  # Unused.

    token = GetToken()
    grr_config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT)
    grr_config.CONFIG.AddContext(contexts.CONFIG_UPDATER_CONTEXT)

    if flags.FLAGS.subparser_name == "initialize":
        config_lib.ParseConfigCommandLine()
        if flags.FLAGS.noprompt:
            InitializeNoPrompt(grr_config.CONFIG, token=token)
        else:
            Initialize(grr_config.CONFIG, token=token)
        return

    server_startup.Init()

    try:
        print("Using configuration %s" % grr_config.CONFIG)
    except AttributeError:
        raise RuntimeError("No valid config specified.")

    if flags.FLAGS.subparser_name == "generate_keys":
        try:
            GenerateKeys(grr_config.CONFIG,
                         overwrite_keys=flags.FLAGS.overwrite_keys)
        except RuntimeError as e:
            # GenerateKeys will raise if keys exist and overwrite_keys is not set.
            print("ERROR: %s" % e)
            sys.exit(1)
        grr_config.CONFIG.Write()

    elif flags.FLAGS.subparser_name == "repack_clients":
        upload = not flags.FLAGS.noupload
        repacking.TemplateRepacker().RepackAllTemplates(upload=upload,
                                                        token=token)

    elif flags.FLAGS.subparser_name == "show_user":
        maintenance_utils.ShowUser(flags.FLAGS.username, token=token)

    elif flags.FLAGS.subparser_name == "update_user":
        try:
            maintenance_utils.UpdateUser(flags.FLAGS.username,
                                         flags.FLAGS.password,
                                         flags.FLAGS.add_labels,
                                         flags.FLAGS.delete_labels,
                                         token=token)
        except maintenance_utils.UserError as e:
            print(e)

    elif flags.FLAGS.subparser_name == "delete_user":
        maintenance_utils.DeleteUser(flags.FLAGS.username, token=token)

    elif flags.FLAGS.subparser_name == "add_user":
        labels = []
        if not flags.FLAGS.noadmin:
            labels.append("admin")

        if flags.FLAGS.labels:
            labels.extend(flags.FLAGS.labels)

        try:
            maintenance_utils.AddUser(flags.FLAGS.username,
                                      flags.FLAGS.password,
                                      labels,
                                      token=token)
        except maintenance_utils.UserError as e:
            print(e)

    elif flags.FLAGS.subparser_name == "upload_python":
        python_hack_root_urn = grr_config.CONFIG.Get("Config.python_hack_root")
        content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30)
        aff4_path = flags.FLAGS.dest_path
        platform = flags.FLAGS.platform
        if not aff4_path:
            aff4_path = python_hack_root_urn.Add(platform.lower()).Add(
                os.path.basename(flags.FLAGS.file))
        if not str(aff4_path).startswith(str(python_hack_root_urn)):
            raise ValueError("AFF4 path must start with %s." %
                             python_hack_root_urn)
        context = ["Platform:%s" % platform.title(), "Client Context"]
        maintenance_utils.UploadSignedConfigBlob(content,
                                                 aff4_path=aff4_path,
                                                 client_context=context,
                                                 token=token)

    elif flags.FLAGS.subparser_name == "upload_exe":
        content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30)
        context = [
            "Platform:%s" % flags.FLAGS.platform.title(), "Client Context"
        ]

        if flags.FLAGS.dest_path:
            dest_path = rdfvalue.RDFURN(flags.FLAGS.dest_path)
        else:
            dest_path = grr_config.CONFIG.Get(
                "Executables.aff4_path",
                context=context).Add(os.path.basename(flags.FLAGS.file))

        # Now upload to the destination.
        maintenance_utils.UploadSignedConfigBlob(content,
                                                 aff4_path=dest_path,
                                                 client_context=context,
                                                 token=token)

        print("Uploaded to %s" % dest_path)

    elif flags.FLAGS.subparser_name == "set_var":
        config = grr_config.CONFIG
        print("Setting %s to %s" % (flags.FLAGS.var, flags.FLAGS.val))
        if flags.FLAGS.val.startswith("["):  # Allow setting of basic lists.
            flags.FLAGS.val = flags.FLAGS.val[1:-1].split(",")
        config.Set(flags.FLAGS.var, flags.FLAGS.val)
        config.Write()

    elif flags.FLAGS.subparser_name == "upload_raw":
        if not flags.FLAGS.dest_path:
            flags.FLAGS.dest_path = aff4.ROOT_URN.Add("config").Add("raw")
        uploaded = UploadRaw(flags.FLAGS.file,
                             flags.FLAGS.dest_path,
                             token=token)
        print("Uploaded to %s" % uploaded)

    elif flags.FLAGS.subparser_name == "upload_artifact":
        yaml.load(open(flags.FLAGS.file, "rb"))  # Check it will parse.
        try:
            artifact.UploadArtifactYamlFile(
                open(flags.FLAGS.file, "rb").read(),
                overwrite=flags.FLAGS.overwrite_artifact)
        except rdf_artifacts.ArtifactDefinitionError as e:
            print("Error %s. You may need to set --overwrite_artifact." % e)

    elif flags.FLAGS.subparser_name == "delete_artifacts":
        artifact_list = flags.FLAGS.artifact
        if not artifact_list:
            raise ValueError("No artifact to delete given.")
        artifact_registry.DeleteArtifactsFromDatastore(artifact_list,
                                                       token=token)
        print("Artifacts %s deleted." % artifact_list)

    elif flags.FLAGS.subparser_name == "download_missing_rekall_profiles":
        print("Downloading missing Rekall profiles.")
        s = rekall_profile_server.GRRRekallProfileServer()
        s.GetMissingProfiles()

    elif flags.FLAGS.subparser_name == "set_global_notification":
        notification = aff4_users.GlobalNotification(
            type=flags.FLAGS.type,
            header=flags.FLAGS.header,
            content=flags.FLAGS.content,
            link=flags.FLAGS.link)
        if flags.FLAGS.show_from:
            notification.show_from = rdfvalue.RDFDatetime(
            ).ParseFromHumanReadable(flags.FLAGS.show_from)
        if flags.FLAGS.duration:
            notification.duration = rdfvalue.Duration().ParseFromHumanReadable(
                flags.FLAGS.duration)

        print("Setting global notification.")
        print(notification)

        with aff4.FACTORY.Create(
                aff4_users.GlobalNotificationStorage.DEFAULT_PATH,
                aff4_type=aff4_users.GlobalNotificationStorage,
                mode="rw",
                token=token) as storage:
            storage.AddNotification(notification)
    elif flags.FLAGS.subparser_name == "rotate_server_key":
        print("""
You are about to rotate the server key. Note that:

  - Clients might experience intermittent connection problems after
    the server keys rotated.

  - It's not possible to go back to an earlier key. Clients that see a
    new certificate will remember the cert's serial number and refuse
    to accept any certificate with a smaller serial number from that
    point on.
    """)

        if raw_input("Continue? [yN]: ").upper() == "Y":
            if flags.FLAGS.keylength:
                keylength = int(flags.FLAGS.keylength)
            else:
                keylength = grr_config.CONFIG["Server.rsa_key_length"]

            maintenance_utils.RotateServerKey(cn=flags.FLAGS.common_name,
                                              keylength=keylength)
    elif flags.FLAGS.subparser_name == "migrate_data":
        data_migration.Migrate()
Exemplo n.º 28
0
class NoRandom(aff4_cronjobs.SystemCronFlow):
  frequency = rdfvalue.Duration("1d")
  lifetime = rdfvalue.Duration("12h")
  start_time_randomization = False
Exemplo n.º 29
0
 def testOneDayClientStatus(self):
     client_id = self.CreateClient(last_ping=rdfvalue.RDFDatetime.Now() -
                                   rdfvalue.Duration("1h"))
     self.Open("/#c=" + str(client_id))
     self.WaitUntil(self.IsElementPresent, "css=img[src$='online-1d.png']")
Exemplo n.º 30
0
 def testFloorToHours(self):
     datetime = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
     expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:00")
     self.assertEqual(datetime.Floor(rdfvalue.Duration("1h")), expected)