Exemple #1
0
 def testTempPathIsConvertedToURNCorrectly(self):
     v = rdf_objects.VfsFileReference(client_id=self.client_id,
                                      path_type="TEMP",
                                      path_components=["a", "b", "c"])
     self.assertEqual(
         v.ToURN(), rdfvalue.RDFURN("aff4:/%s/temp/a/b/c" % self.client_id))
Exemple #2
0
 def testHuntURNFromID(self):
   hunt_urn = hunt.HuntURNFromID("12345678")
   self.assertIsInstance(hunt_urn, rdfvalue.RDFURN)
   self.assertEqual(hunt_urn, rdfvalue.RDFURN("aff4:/hunts/H:12345678"))
def GetAFF4ExecutablesRoot():
    return rdfvalue.RDFURN("aff4:/config/executables")
Exemple #4
0
def CreateClientIndex(token=None):
    return aff4.FACTORY.Create(rdfvalue.RDFURN("aff4:/client_index"),
                               aff4_type=AFF4ClientIndex,
                               mode="rw",
                               object_exists=True,
                               token=token)
Exemple #5
0
    def Run(self):
        client_id = self.SetupClient(0).Basename()

        if data_store.RelationalDBReadEnabled("hunts"):
            hunt_id = self.CreateHunt()
            flow_id = flow_test_lib.StartFlow(flows_processes.ListProcesses,
                                              client_id=client_id,
                                              parent_hunt_id=hunt_id)

            with test_lib.FakeTime(
                    rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2)):
                data_store.REL_DB.WriteFlowResults([
                    rdf_flow_objects.FlowResult(
                        client_id=client_id,
                        flow_id=flow_id,
                        hunt_id=hunt_id,
                        payload=rdfvalue.RDFString("blah1"))
                ])

            with test_lib.FakeTime(
                    rdfvalue.RDFDatetime.FromSecondsSinceEpoch(43)):
                data_store.REL_DB.WriteFlowResults([
                    rdf_flow_objects.FlowResult(
                        client_id=client_id,
                        flow_id=flow_id,
                        hunt_id=hunt_id,
                        payload=rdfvalue.RDFString("blah2-foo"))
                ])
        else:
            hunt_urn = rdfvalue.RDFURN("aff4:/hunts/H:123456")
            hunt_id = hunt_urn.Basename()

            results = implementation.GRRHunt.ResultCollectionForHID(hunt_urn)
            with data_store.DB.GetMutationPool() as pool:
                result = rdf_flows.GrrMessage(
                    source=client_id,
                    payload=rdfvalue.RDFString("blah1"),
                    age=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1))
                results.Add(result,
                            timestamp=result.age + rdfvalue.Duration("1s"),
                            mutation_pool=pool)

                result = rdf_flows.GrrMessage(
                    source=client_id,
                    payload=rdfvalue.RDFString("blah2-foo"),
                    age=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(42))
                results.Add(result,
                            timestamp=result.age + rdfvalue.Duration("1s"),
                            mutation_pool=pool)

        replace = {hunt_id: "H:123456"}
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id),
                   replace=replace)
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id,
                                                           count=1),
                   replace=replace)
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id,
                                                           offset=1,
                                                           count=1),
                   replace=replace)
        self.Check("ListHuntResults",
                   args=hunt_plugin.ApiListHuntResultsArgs(hunt_id=hunt_id,
                                                           filter="foo"),
                   replace=replace)
 def setUp(self):
     super(CheckAccessHelperTest, self).setUp()
     self.helper = user_managers.CheckAccessHelper("test")
     self.subject = rdfvalue.RDFURN("aff4:/some/path")
Exemple #7
0
def DownloadCollection(coll_path,
                       target_path,
                       token=None,
                       overwrite=False,
                       dump_client_info=False,
                       flatten=False,
                       max_threads=10):
    """Iterate through a Collection object downloading all files.

  Args:
    coll_path: Path to an AFF4 collection.
    target_path: Base directory to write to.
    token: Token for access.
    overwrite: If True, overwrite existing files.
    dump_client_info: If True, this will detect client paths, and dump a yaml
      version of the client object to the root path. This is useful for seeing
      the hostname/users of the machine the client id refers to.
    flatten: If True, produce a "files" flat folder with links to all the found
             files.
    max_threads: Use this many threads to do the downloads.
  """
    completed_clients = set()
    coll = _OpenCollectionPath(coll_path)
    if coll is None:
        logging.error(
            "%s is not a valid collection. Typo? "
            "Are you sure something was written to it?", coll_path)
        return

    thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
    thread_pool.Start()

    # Extract the client id from the source urn. This code makes me
    # quite sad but there is just no concept of passing a client id in
    # the export tool (and this would be unsafe anyways since the user
    # could then download files from arbitrary machines easily). The
    # export tool is on the way to deprecation so we decided to do this
    # instead of fixing the obsolete code.
    try:
        collection_urn = coll.collection_id
    except AttributeError:
        collection_urn = coll.urn

    try:
        original_client_id = rdf_client.ClientURN(collection_urn.Split()[0])
    except IOError:
        original_client_id = None

    logging.info("Expecting to download %s files", len(coll))

    # Collections can include anything they want, but we only handle RDFURN and
    # StatEntry entries in this function.
    for grr_message in coll:
        source = None
        # If a raw message, work out the type.
        if isinstance(grr_message, rdf_flows.GrrMessage):
            source = grr_message.source
            grr_message = grr_message.payload

        if isinstance(grr_message, rdfvalue.RDFURN):
            urn = grr_message
        elif isinstance(grr_message, rdf_client_fs.StatEntry):
            urn = rdfvalue.RDFURN(
                grr_message.AFF4Path(source or original_client_id))
        elif isinstance(grr_message, rdf_file_finder.FileFinderResult):
            urn = rdfvalue.RDFURN(
                grr_message.stat_entry.AFF4Path(source or original_client_id))
        elif isinstance(grr_message, collectors.ArtifactFilesDownloaderResult):
            if grr_message.HasField("downloaded_file"):
                urn = grr_message.downloaded_file.AFF4Path(
                    source or original_client_id)
            else:
                continue
        elif isinstance(grr_message, rdfvalue.RDFBytes):
            try:
                os.makedirs(target_path)
            except OSError:
                pass
            try:
                # We just dump out bytes and carry on.
                client_id = source.Split()[0]
                with open(os.path.join(target_path, client_id), "wb") as fd:
                    fd.write(str(grr_message))
            except AttributeError:
                pass
            continue
        else:
            continue

        # Handle dumping client info, but only once per client.
        if dump_client_info:
            client_id = urn.Split()[0]
            re_match = aff4_grr.VFSGRRClient.CLIENT_ID_RE.match(client_id)
            if re_match and client_id not in completed_clients:
                args = (rdf_client.ClientURN(client_id), target_path, token,
                        overwrite)
                thread_pool.AddTask(target=DumpClientYaml,
                                    args=args,
                                    name="ClientYamlDownloader")
                completed_clients.add(client_id)

        # Now queue downloading the actual files.
        args = (urn, target_path, token, overwrite)
        if flatten:
            target = CopyAndSymlinkAFF4ToLocal
        else:
            target = CopyAFF4ToLocal
        thread_pool.AddTask(target=target, args=args, name="Downloader")

    # Join and stop the threadpool.
    thread_pool.Stop()
Exemple #8
0
  def testNewHuntWizard(self):
    # Open up and click on View Hunts.
    self.Open("/")
    self.WaitUntil(self.IsElementPresent, "client_query")
    self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=hunts]")
    self.Click("css=a[grrtarget=hunts]")
    self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")

    # Open up "New Hunt" wizard
    self.Click("css=button[name=NewHunt]")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('What to run?')")

    # Click on Filesystem item in flows list
    self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > i.jstree-icon")
    self.Click("css=#_Filesystem > i.jstree-icon")

    # Click on the FileFinder item in Filesystem flows list
    self.Click("link=File Finder")

    # Wait for flow configuration form to be rendered (just wait for first
    # input field).
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-new-hunt-wizard-form label:contains('Paths')")

    # Change "path" and "pathtype" values
    self.Type(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-repeated-field:has(label:contains('Paths')) "
        "input", "/tmp")
    self.Select(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-single-field:has(label:contains('Pathtype')) "
        "select", "TSK")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Click on "Back" button and check that all the values in the form
    # remain intact.
    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-new-hunt-wizard-form label:contains('Paths')")

    self.assertEqual(
        "/tmp",
        self.GetValue(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-repeated-field:has(label:contains('Paths')) input"))

    self.assertEqual(
        "TSK",
        self.GetSelectedLabel(
            "css=grr-new-hunt-wizard-form "
            "grr-form-proto-single-field:has(label:contains('Pathtype')) select"
        ))

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Hunt parameters')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Configure the hunt to use dummy output plugin.
    self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
    self.Select("css=grr-new-hunt-wizard-form select", "DummyOutputPlugin")
    self.Type(
        "css=grr-new-hunt-wizard-form "
        "grr-form-proto-single-field:has(label:contains('Filename Regex')) "
        "input", "some regex")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Where to run?')")

    # Empty set of rules should be valid.
    self.WaitUntil(self.IsElementPresent, "css=button.Next:not([disabled])")

    # A note informs what an empty set of rules means.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('No rules specified!')")

    # Alternative match mode that matches a client if
    # any of the rules evaluates to true can be selected.
    self.Select(
        "css=grr-configure-rules-page "
        "label:contains('Match mode') ~ * select", "Match any")

    # The note depends on the match mode.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('No rules specified!')")

    # Create 3 foreman rules. Note that "Add" button adds rules
    # to the beginning of a list. So we always use :nth(0) selector.
    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Select("css=grr-configure-rules-page div.well:nth(0) select", "Regex")
    rule = foreman_rules.ForemanRegexClientRule
    label = rule.ForemanStringField.SYSTEM.description
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Field') ~ * select", label)
    self.Type(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Attribute regex') ~ * input", "Linux")

    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Select("css=grr-configure-rules-page div.well:nth(0) select",
                "Integer")

    rule = foreman_rules.ForemanIntegerClientRule
    label = rule.ForemanIntegerField.CLIENT_CLOCK.description
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Field') ~ * select", label)
    self.Select(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Operator') ~ * select", "GREATER_THAN")
    self.Type(
        "css=grr-configure-rules-page div.well:nth(0) "
        "label:contains('Value') ~ * input", "1336650631137737")

    self.Click("css=grr-configure-rules-page button[name=Add]")
    self.Click("css=grr-configure-rules-page div.well:nth(0) "
               "label:contains('Os darwin') ~ * input[type=checkbox]")

    # Click on "Back" button
    self.Click("css=grr-new-hunt-wizard-form button.Back")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('How to process results')")

    # Click on "Next" button again and check that all the values that
    # we've just entered remain intact.
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Where to run?')")

    # Click on "Next" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Review')")

    # Check that the arguments summary is present.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Paths')")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('/tmp')")

    # Check that output plugins are shown.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-wizard-form:contains('DummyOutputPlugin')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-wizard-form:contains('some regex')"))

    # Check that there's no deprecated rules summary.
    self.assertFalse(
        self.IsElementPresent("css=grr-wizard-form:contains('Regex rules')"))
    self.assertFalse(
        self.IsElementPresent("css=grr-wizard-form:contains('Integer rules')"))

    # Check that rules summary is present.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-wizard-form:contains('Client rule set')"))

    # Click on "Run" button
    self.Click("css=grr-new-hunt-wizard-form button.Next")

    self.WaitUntil(self.IsElementPresent,
                   "css=grr-wizard-form:contains('Created Hunt')")

    # Close the window and check that the hunt was created.
    self.Click("css=button.Next")

    # Select newly created hunt.
    self.Click("css=grr-hunts-list td:contains('gui_user')")

    # Check that correct details are displayed in hunt details tab.
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-hunt-inspector:contains('GenericHunt')")
    self.WaitUntil(self.IsElementPresent,
                   "css=grr-hunt-inspector:contains('Flow Arguments')")

    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('Paths')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('/tmp')"))

    self.assertTrue(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('DummyOutputPlugin')"))
    self.assertTrue(
        self.IsElementPresent("css=grr-hunt-inspector:contains('some regex')"))

    # Check that there's no deprecated rules summary.
    self.assertFalse(
        self.IsElementPresent("css=grr-hunt-inspector:contains('Regex rules')"))
    self.assertFalse(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('Integer rules')"))

    # Check that rules summary is present.
    self.assertTrue(
        self.IsElementPresent(
            "css=grr-hunt-inspector:contains('Client Rule Set')"))

    # Check that the hunt object was actually created
    if data_store.RelationalDBEnabled():
      hunts_list = sorted(
          data_store.REL_DB.ReadHuntObjects(offset=0, count=10),
          key=lambda x: x.create_time)
      self.assertLen(hunts_list, 1)

      # Check that the hunt was created with a correct flow
      hunt = hunts_list[0]

      self.assertEqual(hunt.args.standard.flow_name,
                       file_finder.FileFinder.__name__)
      self.assertEqual(hunt.args.standard.flow_args.paths[0], "/tmp")
      self.assertEqual(hunt.args.standard.flow_args.pathtype,
                       rdf_paths.PathSpec.PathType.TSK)
      # self.assertEqual(hunt.args.flow_args.ignore_errors, True)
      self.assertTrue(hunt.output_plugins[0].plugin_name, "DummyOutputPlugin")

      # Check that hunt was not started
      self.assertEqual(hunt.hunt_state, hunt.HuntState.PAUSED)

      lib_hunt.StartHunt(hunt.hunt_id)

      hunt_rules = self.FindForemanRules(
          rdfvalue.RDFURN("hunts").Add(hunt.hunt_id), token=self.token)
    else:
      hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
      hunts_list = list(hunts_root.OpenChildren())
      self.assertLen(hunts_list, 1)

      # Check that the hunt was created with a correct flow
      hunt = hunts_list[0]
      self.assertEqual(hunt.args.flow_runner_args.flow_name,
                       file_finder.FileFinder.__name__)
      self.assertEqual(hunt.args.flow_args.paths[0], "/tmp")
      self.assertEqual(hunt.args.flow_args.pathtype,
                       rdf_paths.PathSpec.PathType.TSK)
      # self.assertEqual(hunt.args.flow_args.ignore_errors, True)
      self.assertTrue(hunt.runner_args.output_plugins[0].plugin_name,
                      "DummyOutputPlugin")

      # Check that hunt was not started
      self.assertEqual(hunt.Get(hunt.Schema.STATE), "PAUSED")

      with aff4.FACTORY.Open(hunt.urn, mode="rw", token=self.token) as hunt:
        hunt.Run()

      hunt_rules = self.FindForemanRules(hunt.urn, token=self.token)

    # Check that the hunt was created with correct rules
    self.assertLen(hunt_rules, 1)
    lifetime = hunt_rules[0].GetLifetime()
    lifetime -= rdfvalue.Duration("2w")
    self.assertLessEqual(lifetime, rdfvalue.Duration("1s"))

    r = hunt_rules[0].client_rule_set

    self.assertEqual(r.match_mode,
                     foreman_rules.ForemanClientRuleSet.MatchMode.MATCH_ANY)
    self.assertLen(r.rules, 3)

    self.assertEqual(r.rules[0].rule_type,
                     foreman_rules.ForemanClientRule.Type.OS)
    self.assertEqual(r.rules[0].os.os_windows, False)
    self.assertEqual(r.rules[0].os.os_linux, False)
    self.assertEqual(r.rules[0].os.os_darwin, True)

    self.assertEqual(r.rules[1].rule_type,
                     foreman_rules.ForemanClientRule.Type.INTEGER)
    self.assertEqual(r.rules[1].integer.field, "CLIENT_CLOCK")
    self.assertEqual(
        r.rules[1].integer.operator,
        foreman_rules.ForemanIntegerClientRule.Operator.GREATER_THAN)
    self.assertEqual(r.rules[1].integer.value, 1336650631137737)

    self.assertEqual(r.rules[2].rule_type,
                     foreman_rules.ForemanClientRule.Type.REGEX)
    self.assertEqual(r.rules[2].regex.field, "SYSTEM")
    self.assertEqual(r.rules[2].regex.attribute_regex, "Linux")
 def setUp(self):
     super(MultiTypeCollectionTest, self).setUp()
     self.pool = data_store.DB.GetMutationPool()
     self.collection = multi_type_collection.MultiTypeCollection(
         rdfvalue.RDFURN("aff4:/mt_collection/testAddScan"))
Exemple #10
0
 def ClientURNFromURN(urn):
     return rdf_client.ClientURN(rdfvalue.RDFURN(urn).Split()[0])
Exemple #11
0
    class SchemaCls(standard.VFSDirectory.SchemaCls):
        """The schema for the client."""
        client_index = rdfvalue.RDFURN("aff4:/index/client")

        FLEETSPEAK_ENABLED = aff4.Attribute(
            "metadata:IsFleetspeak", rdfvalue.RDFBool,
            "Whether this client uses Fleetspeak for comms.")

        CERT = aff4.Attribute("metadata:cert", rdf_crypto.RDFX509Cert,
                              "The PEM encoded cert of the client.")

        FILESYSTEM = aff4.Attribute("aff4:filesystem",
                                    rdf_client_fs.Filesystems,
                                    "Filesystems on the client.")

        CLIENT_INFO = aff4.Attribute("metadata:ClientInfo",
                                     rdf_client.ClientInformation,
                                     "GRR client information",
                                     "GRR client",
                                     default=rdf_client.ClientInformation())

        LAST_BOOT_TIME = aff4.Attribute("metadata:LastBootTime",
                                        rdfvalue.RDFDatetime,
                                        "When the machine was last booted",
                                        "BootTime")

        FIRST_SEEN = aff4.Attribute(
            "metadata:FirstSeen", rdfvalue.RDFDatetime,
            "First time the client registered with us", "FirstSeen")

        # Information about the host.
        HOSTNAME = aff4.Attribute("metadata:hostname",
                                  rdfvalue.RDFString,
                                  "Hostname of the host.",
                                  "Host",
                                  index=client_index)
        FQDN = aff4.Attribute("metadata:fqdn",
                              rdfvalue.RDFString,
                              "Fully qualified hostname of the host.",
                              "FQDN",
                              index=client_index)

        SYSTEM = aff4.Attribute("metadata:system", rdfvalue.RDFString,
                                "Operating System class.", "System")
        UNAME = aff4.Attribute("metadata:uname", rdfvalue.RDFString,
                               "Uname string.", "Uname")
        OS_RELEASE = aff4.Attribute("metadata:os_release", rdfvalue.RDFString,
                                    "OS Major release number.", "Release")
        OS_VERSION = aff4.Attribute("metadata:os_version",
                                    rdf_client.VersionString,
                                    "OS Version number.", "Version")

        # ARCH values come from platform.uname machine value, e.g. x86_64, AMD64.
        ARCH = aff4.Attribute("metadata:architecture", rdfvalue.RDFString,
                              "Architecture.", "Architecture")
        INSTALL_DATE = aff4.Attribute("metadata:install_date",
                                      rdfvalue.RDFDatetime, "Install Date.",
                                      "Install")

        # The knowledge base is used for storing data about the host and users.
        # This is currently a slightly odd object as we only use some of the fields.
        # The proto itself is used in Artifact handling outside of GRR (e.g. Plaso).
        # Over time we will migrate fields into this proto, but for now it is a mix.
        KNOWLEDGE_BASE = aff4.Attribute("metadata:knowledge_base",
                                        rdf_client.KnowledgeBase,
                                        "Artifact Knowledge Base",
                                        "KnowledgeBase")

        GRR_CONFIGURATION = aff4.Attribute(
            "aff4:client_configuration", rdf_protodict.Dict,
            "Running configuration for the GRR client.", "Config")

        LIBRARY_VERSIONS = aff4.Attribute(
            "aff4:library_versions", rdf_protodict.Dict,
            "Running library versions for the client.", "Libraries")

        USERNAMES = aff4.Attribute("aff4:user_names",
                                   SpaceSeparatedStringArray,
                                   "A space separated list of system users.",
                                   "Usernames",
                                   index=client_index)

        # This information is duplicated from the INTERFACES attribute but is done
        # to allow for fast searching by mac address.
        MAC_ADDRESS = aff4.Attribute("aff4:mac_addresses",
                                     rdfvalue.RDFString,
                                     "A hex encoded MAC address.",
                                     "MAC",
                                     index=client_index)

        KERNEL = aff4.Attribute("aff4:kernel_version", rdfvalue.RDFString,
                                "Kernel version string.", "KernelVersion")

        # Same for IP addresses.
        HOST_IPS = aff4.Attribute("aff4:host_ips",
                                  rdfvalue.RDFString,
                                  "An IP address.",
                                  "Host_ip",
                                  index=client_index)

        PING = aff4.Attribute(
            "metadata:ping",
            rdfvalue.RDFDatetime,
            "The last time the server heard from this client.",
            "LastCheckin",
            versioned=False,
            default=0)

        CLOCK = aff4.Attribute("metadata:clock",
                               rdfvalue.RDFDatetime,
                               "The last clock read on the client "
                               "(Can be used to estimate client clock skew).",
                               "Clock",
                               versioned=False)

        CLIENT_IP = aff4.Attribute(
            "metadata:client_ip",
            rdfvalue.RDFString,
            "The ip address this client connected from.",
            "Client_ip",
            versioned=False)

        # This is the last foreman rule that applied to us
        LAST_FOREMAN_TIME = aff4.Attribute(
            "aff4:last_foreman_time",
            rdfvalue.RDFDatetime,
            "The last time the foreman checked us.",
            versioned=False)

        LAST_CRASH = aff4.Attribute("aff4:last_crash",
                                    rdf_client.ClientCrash,
                                    "Last client crash.",
                                    creates_new_object_version=False,
                                    versioned=False)

        VOLUMES = aff4.Attribute("aff4:volumes", rdf_client_fs.Volumes,
                                 "Client disk volumes.")

        INTERFACES = aff4.Attribute("aff4:interfaces",
                                    rdf_client_network.Interfaces,
                                    "Network interfaces.", "Interfaces")

        HARDWARE_INFO = aff4.Attribute("aff4:hardware_info",
                                       rdf_client.HardwareInfo,
                                       "Various hardware information.",
                                       default=rdf_client.HardwareInfo())

        MEMORY_SIZE = aff4.Attribute(
            "aff4:memory_size", rdfvalue.ByteSize,
            "Amount of memory this client's machine has.")

        # Cloud VM information.
        CLOUD_INSTANCE = aff4.Attribute("metadata:cloud_instance",
                                        rdf_cloud.CloudInstance,
                                        "Information about cloud machines.")
Exemple #12
0
 def client_urn(self) -> rdfvalue.RDFURN:
     return rdfvalue.RDFURN(self.client_id)
Exemple #13
0
from grr_response_server import notification
from grr_response_server import output_plugin
from grr_response_server.flows.general import export
from grr_response_server.gui import api_call_handler_base
from grr_response_server.gui import api_call_handler_utils
from grr_response_server.gui.api_plugins import client as api_client
from grr_response_server.gui.api_plugins import flow as api_flow
from grr_response_server.gui.api_plugins import output_plugin as api_output_plugin
from grr_response_server.gui.api_plugins import vfs as api_vfs
from grr_response_server.hunts import implementation
from grr_response_server.hunts import standard

from grr_response_server.rdfvalues import hunts as rdf_hunts
from grr_response_server.rdfvalues import objects as rdf_objects

HUNTS_ROOT_PATH = rdfvalue.RDFURN("aff4:/hunts")


class HuntNotFoundError(api_call_handler_base.ResourceNotFoundError):
    """Raised when a hunt could not be found."""


class HuntFileNotFoundError(api_call_handler_base.ResourceNotFoundError):
    """Raised when a hunt file could not be found."""


class Error(Exception):
    pass


class InvalidHuntStateError(Error):
Exemple #14
0
 def GenerateSample(self, number=0):
     return rdfvalue.RDFURN("aff4:/C.12342%s/fs/os/" % number)
Exemple #15
0
class AbstractClientStatsCronFlow(aff4_cronjobs.SystemCronFlow):
  """A cron job which opens every client in the system.

  We feed all the client objects to the AbstractClientStatsCollector instances.
  """

  CLIENT_STATS_URN = rdfvalue.RDFURN("aff4:/stats/ClientFleetStats")

  def BeginProcessing(self):
    pass

  def ProcessLegacyClient(self, ping, client):
    raise NotImplementedError()

  def ProcessClientFullInfo(self, client_full_info):
    raise NotImplementedError()

  def FinishProcessing(self):
    pass

  def _GetClientLabelsList(self, client):
    """Get set of labels applied to this client."""
    return set(["All"] + list(client.GetLabelsNames(owner="GRR")))

  def _StatsForLabel(self, label):
    if label not in self.stats:
      self.stats[label] = aff4.FACTORY.Create(
          self.CLIENT_STATS_URN.Add(label),
          aff4_stats.ClientFleetStats,
          mode="w",
          token=self.token)
    return self.stats[label]

  def Start(self):
    """Retrieve all the clients for the AbstractClientStatsCollectors."""
    try:

      self.stats = {}

      self.BeginProcessing()

      processed_count = 0
      if data_store.RelationalDBReadEnabled():
        for client in _IterateAllClients():
          self.ProcessClientFullInfo(client)
          processed_count += 1
          # This flow is not dead: we don't want to run out of lease time.
          self.HeartBeat()
      else:
        for ping, client in _IterateAllLegacyClients(self.token):
          self.ProcessLegacyClient(ping, client)
          processed_count += 1
          # This flow is not dead: we don't want to run out of lease time.
          self.HeartBeat()

      self.FinishProcessing()
      for fd in itervalues(self.stats):
        fd.Close()

      logging.info("%s: processed %d clients.", self.__class__.__name__,
                   processed_count)
    except Exception as e:  # pylint: disable=broad-except
      logging.exception("Error while calculating stats: %s", e)
      raise
Exemple #16
0
def main(argv):
  """Main."""
  del argv  # Unused.

  token = config_updater_util.GetToken()
  grr_config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT)
  grr_config.CONFIG.AddContext(contexts.CONFIG_UPDATER_CONTEXT)

  if flags.FLAGS.subparser_name == "initialize":
    config_lib.ParseConfigCommandLine()
    if flags.FLAGS.noprompt:
      config_updater_util.InitializeNoPrompt(grr_config.CONFIG, token=token)
    else:
      config_updater_util.Initialize(grr_config.CONFIG, token=token)
    return

  server_startup.Init()

  try:
    print("Using configuration %s" % grr_config.CONFIG)
  except AttributeError:
    raise RuntimeError("No valid config specified.")

  if flags.FLAGS.subparser_name == "generate_keys":
    try:
      config_updater_util.GenerateKeys(
          grr_config.CONFIG, overwrite_keys=flags.FLAGS.overwrite_keys)
    except RuntimeError as e:
      # GenerateKeys will raise if keys exist and overwrite_keys is not set.
      print("ERROR: %s" % e)
      sys.exit(1)
    grr_config.CONFIG.Write()

  elif flags.FLAGS.subparser_name == "repack_clients":
    upload = not flags.FLAGS.noupload
    repacking.TemplateRepacker().RepackAllTemplates(upload=upload, token=token)

  elif flags.FLAGS.subparser_name == "show_user":
    maintenance_utils.ShowUser(flags.FLAGS.username, token=token)

  elif flags.FLAGS.subparser_name == "update_user":
    try:
      maintenance_utils.UpdateUser(
          flags.FLAGS.username,
          flags.FLAGS.password,
          flags.FLAGS.add_labels,
          flags.FLAGS.delete_labels,
          token=token)
    except maintenance_utils.UserError as e:
      print(e)

  elif flags.FLAGS.subparser_name == "delete_user":
    maintenance_utils.DeleteUser(flags.FLAGS.username, token=token)

  elif flags.FLAGS.subparser_name == "add_user":
    labels = []
    if not flags.FLAGS.noadmin:
      labels.append("admin")

    if flags.FLAGS.labels:
      labels.extend(flags.FLAGS.labels)

    try:
      maintenance_utils.AddUser(
          flags.FLAGS.username, flags.FLAGS.password, labels, token=token)
    except maintenance_utils.UserError as e:
      print(e)

  elif flags.FLAGS.subparser_name == "upload_python":
    python_hack_root_urn = grr_config.CONFIG.Get("Config.python_hack_root")
    content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30)
    aff4_path = flags.FLAGS.dest_path
    platform = flags.FLAGS.platform
    if not aff4_path:
      aff4_path = python_hack_root_urn.Add(platform.lower()).Add(
          os.path.basename(flags.FLAGS.file))
    if not str(aff4_path).startswith(str(python_hack_root_urn)):
      raise ValueError("AFF4 path must start with %s." % python_hack_root_urn)
    context = ["Platform:%s" % platform.title(), "Client Context"]
    maintenance_utils.UploadSignedConfigBlob(
        content, aff4_path=aff4_path, client_context=context, token=token)

  elif flags.FLAGS.subparser_name == "upload_exe":
    content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30)
    context = ["Platform:%s" % flags.FLAGS.platform.title(), "Client Context"]

    if flags.FLAGS.dest_path:
      dest_path = rdfvalue.RDFURN(flags.FLAGS.dest_path)
    else:
      dest_path = grr_config.CONFIG.Get(
          "Executables.aff4_path", context=context).Add(
              os.path.basename(flags.FLAGS.file))

    # Now upload to the destination.
    maintenance_utils.UploadSignedConfigBlob(
        content, aff4_path=dest_path, client_context=context, token=token)

    print("Uploaded to %s" % dest_path)

  elif flags.FLAGS.subparser_name == "set_var":
    config = grr_config.CONFIG
    print("Setting %s to %s" % (flags.FLAGS.var, flags.FLAGS.val))
    if flags.FLAGS.val.startswith("["):  # Allow setting of basic lists.
      flags.FLAGS.val = flags.FLAGS.val[1:-1].split(",")
    config.Set(flags.FLAGS.var, flags.FLAGS.val)
    config.Write()

  elif flags.FLAGS.subparser_name == "upload_raw":
    if not flags.FLAGS.dest_path:
      flags.FLAGS.dest_path = aff4.ROOT_URN.Add("config").Add("raw")
    uploaded = config_updater_util.UploadRaw(
        flags.FLAGS.file, flags.FLAGS.dest_path, token=token)
    print("Uploaded to %s" % uploaded)

  elif flags.FLAGS.subparser_name == "upload_artifact":
    yaml.load(open(flags.FLAGS.file, "rb"))  # Check it will parse.
    try:
      artifact.UploadArtifactYamlFile(
          open(flags.FLAGS.file, "rb").read(),
          overwrite=flags.FLAGS.overwrite_artifact)
    except rdf_artifacts.ArtifactDefinitionError as e:
      print("Error %s. You may need to set --overwrite_artifact." % e)

  elif flags.FLAGS.subparser_name == "delete_artifacts":
    artifact_list = flags.FLAGS.artifact
    if not artifact_list:
      raise ValueError("No artifact to delete given.")
    artifact_registry.DeleteArtifactsFromDatastore(artifact_list, token=token)
    print("Artifacts %s deleted." % artifact_list)

  elif flags.FLAGS.subparser_name == "download_missing_rekall_profiles":
    print("Downloading missing Rekall profiles.")
    s = rekall_profile_server.GRRRekallProfileServer()
    s.GetMissingProfiles()

  elif flags.FLAGS.subparser_name == "set_global_notification":
    notification = aff4_users.GlobalNotification(
        type=flags.FLAGS.type,
        header=flags.FLAGS.header,
        content=flags.FLAGS.content,
        link=flags.FLAGS.link)
    if flags.FLAGS.show_from:
      notification.show_from = rdfvalue.RDFDatetime().ParseFromHumanReadable(
          flags.FLAGS.show_from)
    if flags.FLAGS.duration:
      notification.duration = rdfvalue.Duration().ParseFromHumanReadable(
          flags.FLAGS.duration)

    print("Setting global notification.")
    print(notification)

    with aff4.FACTORY.Create(
        aff4_users.GlobalNotificationStorage.DEFAULT_PATH,
        aff4_type=aff4_users.GlobalNotificationStorage,
        mode="rw",
        token=token) as storage:
      storage.AddNotification(notification)
  elif flags.FLAGS.subparser_name == "rotate_server_key":
    print("""
You are about to rotate the server key. Note that:

  - Clients might experience intermittent connection problems after
    the server keys rotated.

  - It's not possible to go back to an earlier key. Clients that see a
    new certificate will remember the cert's serial number and refuse
    to accept any certificate with a smaller serial number from that
    point on.
    """)

    if builtins.input("Continue? [yN]: ").upper() == "Y":
      if flags.FLAGS.keylength:
        keylength = int(flags.FLAGS.keylength)
      else:
        keylength = grr_config.CONFIG["Server.rsa_key_length"]

      maintenance_utils.RotateServerKey(
          cn=flags.FLAGS.common_name, keylength=keylength)
  elif flags.FLAGS.subparser_name == "migrate_data":
    data_migration.Migrate()
Exemple #17
0
    def HandleRequest(self, request):
        """Handles given HTTP request."""
        impersonated_username = config.CONFIG["AdminUI.debug_impersonate_user"]
        if impersonated_username:
            logging.info("Overriding user as %s", impersonated_username)
            request.user = config.CONFIG["AdminUI.debug_impersonate_user"]

        if not aff4_users.GRRUser.IsValidUsername(request.user):
            return self._BuildResponse(
                403, dict(message="Invalid username: %s" % request.user))

        try:
            router, method_metadata, args = self._router_matcher.MatchRouter(
                request)
        except access_control.UnauthorizedAccess as e:
            logging.exception("Access denied to %s (%s): %s", request.path,
                              request.method, e)

            additional_headers = {
                "X-GRR-Unauthorized-Access-Reason":
                utils.SmartStr(e.message).replace("\n", ""),
                "X-GRR-Unauthorized-Access-Subject":
                utils.SmartStr(e.subject)
            }
            return self._BuildResponse(
                403,
                dict(message="Access denied by ACL: %s" %
                     utils.SmartStr(e.message),
                     subject=utils.SmartStr(e.subject)),
                headers=additional_headers)

        except ApiCallRouterNotFoundError as e:
            return self._BuildResponse(404, dict(message=e.message))
        except werkzeug_exceptions.MethodNotAllowed as e:
            return self._BuildResponse(405, dict(message=e.message))
        except Error as e:
            logging.exception("Can't match URL to router/method: %s", e)

            return self._BuildResponse(
                500, dict(message=str(e), traceBack=traceback.format_exc()))

        request.method_metadata = method_metadata
        request.parsed_args = args

        # SetUID() is called here so that ACL checks done by the router do not
        # clash with datastore ACL checks.
        # TODO(user): increase token expiry time.
        token = self.BuildToken(request, 60).SetUID()

        # We send a blind-write request to ensure that the user object is created
        # for a user specified by the username.
        user_urn = rdfvalue.RDFURN("aff4:/users/").Add(request.user)
        # We can't use conventional AFF4 interface, since aff4.FACTORY.Create will
        # create a new version of the object for every call.
        with data_store.DB.GetMutationPool() as pool:
            pool.MultiSet(user_urn, {
                aff4_users.GRRUser.SchemaCls.TYPE:
                [aff4_users.GRRUser.__name__],
                aff4_users.GRRUser.SchemaCls.LAST:
                [rdfvalue.RDFDatetime.Now().SerializeToDataStore()]
            },
                          replace=True)

        if data_store.RelationalDBWriteEnabled():
            data_store.REL_DB.WriteGRRUser(request.user)

        handler = None
        try:
            # ACL checks are done here by the router. If this method succeeds (i.e.
            # does not raise), then handlers run without further ACL checks (they're
            # free to do some in their own implementations, though).
            handler = getattr(router, method_metadata.name)(args, token=token)

            if handler.args_type != method_metadata.args_type:
                raise RuntimeError(
                    "Handler args type doesn't match "
                    "method args type: %s vs %s" %
                    (handler.args_type, method_metadata.args_type))

            binary_result_type = (
                api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE)

            if (handler.result_type != method_metadata.result_type and
                    not (handler.result_type is None and
                         method_metadata.result_type == binary_result_type)):
                raise RuntimeError(
                    "Handler result type doesn't match "
                    "method result type: %s vs %s" %
                    (handler.result_type, method_metadata.result_type))

            # HEAD method is only used for checking the ACLs for particular API
            # methods.
            if request.method == "HEAD":
                # If the request would return a stream, we add the Content-Length
                # header to the response.
                if (method_metadata.result_type ==
                        method_metadata.BINARY_STREAM_RESULT_TYPE):
                    binary_stream = handler.Handle(args, token=token)
                    return self._BuildResponse(
                        200, {"status": "OK"},
                        method_name=method_metadata.name,
                        no_audit_log=method_metadata.no_audit_log_required,
                        content_length=binary_stream.content_length,
                        token=token)
                else:
                    return self._BuildResponse(
                        200, {"status": "OK"},
                        method_name=method_metadata.name,
                        no_audit_log=method_metadata.no_audit_log_required,
                        token=token)

            if (method_metadata.result_type ==
                    method_metadata.BINARY_STREAM_RESULT_TYPE):
                binary_stream = handler.Handle(args, token=token)
                return self._BuildStreamingResponse(
                    binary_stream, method_name=method_metadata.name)
            else:
                format_mode = GetRequestFormatMode(request, method_metadata)
                result = self.CallApiHandler(handler, args, token=token)
                rendered_data = self._FormatResultAsJson(
                    result, format_mode=format_mode)

                return self._BuildResponse(
                    200,
                    rendered_data,
                    method_name=method_metadata.name,
                    no_audit_log=method_metadata.no_audit_log_required,
                    token=token)
        except access_control.UnauthorizedAccess as e:
            logging.exception("Access denied to %s (%s) with %s: %s",
                              request.path, request.method,
                              method_metadata.name, e)

            additional_headers = {
                "X-GRR-Unauthorized-Access-Reason":
                utils.SmartStr(e.message).replace("\n", ""),
                "X-GRR-Unauthorized-Access-Subject":
                utils.SmartStr(e.subject)
            }
            return self._BuildResponse(
                403,
                dict(message="Access denied by ACL: %s" % e.message,
                     subject=utils.SmartStr(e.subject)),
                headers=additional_headers,
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except api_call_handler_base.ResourceNotFoundError as e:
            return self._BuildResponse(
                404,
                dict(message=e.message),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except NotImplementedError as e:
            return self._BuildResponse(
                501,
                dict(message=e.message),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
        except Exception as e:  # pylint: disable=broad-except
            logging.exception("Error while processing %s (%s) with %s: %s",
                              request.path, request.method,
                              handler.__class__.__name__, e)
            return self._BuildResponse(
                500,
                dict(message=str(e), traceBack=traceback.format_exc()),
                method_name=method_metadata.name,
                no_audit_log=method_metadata.no_audit_log_required,
                token=token)
Exemple #18
0
    def testNewArtifactLoaded(self):
        """Simulate a new artifact being loaded into the store via the UI."""
        cmd_artifact = """name: "TestCmdArtifact"
doc: "Test command artifact for dpkg."
sources:
- type: "COMMAND"
  attributes:
    cmd: "/usr/bin/dpkg"
    args: ["--list"]
labels: [ "Software" ]
supported_os: [ "Linux" ]
"""
        no_datastore_artifact = """name: "NotInDatastore"
doc: "Test command artifact for dpkg."
sources:
- type: "COMMAND"
  attributes:
    cmd: "/usr/bin/dpkg"
    args: ["--list"]
labels: [ "Software" ]
supported_os: [ "Linux" ]
"""
        test_registry = artifact_registry.ArtifactRegistry()
        test_registry.ClearRegistry()
        test_registry.AddDatastoreSource(
            rdfvalue.RDFURN("aff4:/artifact_store"))
        test_registry._dirty = False
        with utils.Stubber(artifact_registry, "REGISTRY", test_registry):
            with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
                artifact_registry.REGISTRY.GetArtifact("TestCmdArtifact")

            with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
                artifact_registry.REGISTRY.GetArtifact("NotInDatastore")

            # Add artifact to datastore but not registry
            artifact_coll = artifact_registry.ArtifactCollection(
                rdfvalue.RDFURN("aff4:/artifact_store"))
            with data_store.DB.GetMutationPool() as pool:
                for artifact_val in artifact_registry.REGISTRY.ArtifactsFromYaml(
                        cmd_artifact):
                    artifact_coll.Add(artifact_val, mutation_pool=pool)

            # Add artifact to registry but not datastore
            for artifact_val in artifact_registry.REGISTRY.ArtifactsFromYaml(
                    no_datastore_artifact):
                artifact_registry.REGISTRY.RegisterArtifact(
                    artifact_val,
                    source="datastore",
                    overwrite_if_exists=False)

            # We need to reload all artifacts from the data store before trying to get
            # the artifact.
            artifact_registry.REGISTRY.ReloadDatastoreArtifacts()
            self.assertTrue(
                artifact_registry.REGISTRY.GetArtifact("TestCmdArtifact"))

            # We registered this artifact with datastore source but didn't
            # write it into aff4. This simulates an artifact that was
            # uploaded in the UI then later deleted. We expect it to get
            # cleared when the artifacts are reloaded from the datastore.
            with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
                artifact_registry.REGISTRY.GetArtifact("NotInDatastore")
 def testFnmatchPatternCorrectlyMatchesFilesBelowDirectory(self):
     self.helper.Allow("aff4:/some/*")
     self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
     self.assertRaises(access_control.UnauthorizedAccess,
                       self.helper.CheckAccess,
                       rdfvalue.RDFURN("aff4:/some"), self.token)
Exemple #20
0
 def ResultCollectionForArtifact(cls, session_id, artifact_name):
   urn = rdfvalue.RDFURN("_".join((str(session_id.Add(flow.RESULTS_SUFFIX)),
                                   utils.SmartStr(artifact_name))))
   return sequential_collection.GeneralIndexedCollection(urn)
Exemple #21
0
class WellKnownSessionTest2(WellKnownSessionTest):
    """Another testing well known flow."""
    well_known_session_id = rdfvalue.SessionID(queue=rdfvalue.RDFURN("test"),
                                               flow_name="TestSessionId2")
Exemple #22
0
  def testDuplicatedAddDatastore(self):
    sources = self.sources

    self.assertTrue(sources.AddDatastore(rdfvalue.RDFURN("aff4:/artifacts")))
    self.assertFalse(sources.AddDatastore(rdfvalue.RDFURN("aff4:/artifacts")))
Exemple #23
0
 def setUp(self):
     super(BasicContextTests, self).setUp()
     self.context = self.to_test_context()
     self.context.LoadCertificates()
     self.session_id = rdfvalue.RDFURN("W:1234")
Exemple #24
0
  def testRDFStruct(self):
    tested = TestStruct()

    # cant set integers for string attributes.
    self.assertRaises(type_info.TypeValueError, setattr, tested, "foobar", 1)

    # This is a string so a string assignment is good:
    tested.foobar = "Hello"
    self.assertEqual(tested.foobar, "Hello")

    # This field must be another TestStruct instance..
    self.assertRaises(ValueError, setattr, tested, "nested", "foo")

    # Its ok to assign a compatible semantic protobuf.
    tested.nested = TestStruct(foobar="nested_foo")

    # Not OK to use the wrong semantic type.
    self.assertRaises(ValueError, setattr, tested, "nested",
                      PartialTest1(int=1))

    # Not OK to assign a serialized string - even if it is for the right type -
    # since there is no type checking.
    serialized = TestStruct(foobar="nested_foo").SerializeToString()
    self.assertRaises(ValueError, setattr, tested, "nested", serialized)

    # Nested accessors.
    self.assertEqual(tested.nested.foobar, "nested_foo")

    # Test repeated elements:

    # Empty list is ok:
    tested.repeated = []
    self.assertEqual(tested.repeated, [])

    tested.repeated = ["string"]
    self.assertEqual(tested.repeated, ["string"])

    self.assertRaises(type_info.TypeValueError, setattr, tested, "repeated",
                      [1, 2, 3])

    # Coercing on assignment. This field is an RDFURN:
    tested.urn = "www.example.com"
    self.assertTrue(isinstance(tested.urn, rdfvalue.RDFURN))

    self.assertEqual(tested.urn, rdfvalue.RDFURN("www.example.com"))

    # Test enums.
    self.assertEqual(tested.type, 3)
    self.assertEqual(tested.type.name, "THIRD")

    tested.type = "FIRST"
    self.assertEqual(tested.type, 1)

    # Check that string assignments are case-insensitive.
    tested.type = "second"
    self.assertEqual(tested.type, 2)
    tested.type = "ThIrD"
    self.assertEqual(tested.type, 3)

    # Non-valid types are rejected.
    self.assertRaises(type_info.TypeValueError, setattr, tested, "type", "Foo")

    # Strings of digits should be accepted.
    tested.type = "2"
    self.assertEqual(tested.type, 2)
    # unicode strings should be treated the same way.
    tested.type = u"2"
    self.assertEqual(tested.type, 2)
    # Out of range values are permitted and preserved through serialization.
    tested.type = 4
    self.assertEqual(tested.type, 4)
    serialized_type = str(tested.type)
    tested.type = 1
    tested.type = serialized_type
    self.assertEqual(tested.type, 4)
Exemple #25
0
 def testHuntIDFromURN(self):
   self.assertEqual(
       hunt.HuntIDFromURN(rdfvalue.RDFURN("aff4:/hunts/H:12345678")),
       "12345678")
 def _TestCollection(self, collection_id):
     return TestSequentialCollection(rdfvalue.RDFURN(collection_id))
Exemple #27
0
def GetAFF4PythonHackRoot():
    return rdfvalue.RDFURN("aff4:/config/python_hacks")
Exemple #28
0
class AbstractClientStatsCronFlow(aff4_cronjobs.SystemCronFlow):
  """A cron job which opens every client in the system.

  We feed all the client objects to the AbstractClientStatsCollector instances.
  """

  CLIENT_STATS_URN = rdfvalue.RDFURN("aff4:/stats/ClientFleetStats")

  # An rdfvalue.Duration specifying a window of last-ping
  # timestamps to analyze. Clients that haven't communicated with GRR servers
  # longer than the given period will be skipped.
  recency_window = None

  def BeginProcessing(self):
    pass

  def ProcessLegacyClient(self, ping, client):
    raise NotImplementedError()

  def ProcessClientFullInfo(self, client_full_info):
    raise NotImplementedError()

  def FinishProcessing(self):
    pass

  def _GetClientLabelsList(self, client):
    """Get set of labels applied to this client."""
    return set(["All"] + list(client.GetLabelsNames(owner="GRR")))

  def _StatsForLabel(self, label):
    if label not in self.stats:
      self.stats[label] = aff4.FACTORY.Create(
          self.CLIENT_STATS_URN.Add(label),
          aff4_stats.ClientFleetStats,
          mode="w",
          token=self.token)
    return self.stats[label]

  def Start(self):
    """Retrieve all the clients for the AbstractClientStatsCollectors."""
    try:

      self.stats = {}

      self.BeginProcessing()

      processed_count = 0

      if data_store.RelationalDBEnabled():
        for client_info in _IterateAllClients(
            recency_window=self.recency_window):
          self.ProcessClientFullInfo(client_info)
          processed_count += 1

          if processed_count % _CLIENT_READ_BATCH_SIZE == 0:
            self.Log("Processed %d clients.", processed_count)
            self.HeartBeat()

        if processed_count != 0:
          self.Log("Processed %d clients.", processed_count)

      else:
        root_children = aff4.FACTORY.Open(
            aff4.ROOT_URN, token=self.token).OpenChildren(mode="r")
        for batch in collection.Batch(root_children, _CLIENT_READ_BATCH_SIZE):
          for child in batch:
            if not isinstance(child, aff4_grr.VFSGRRClient):
              continue

            last_ping = child.Get(child.Schema.PING)

            self.ProcessLegacyClient(last_ping, child)
            processed_count += 1
            # This flow is not dead: we don't want to run out of lease time.
            self.HeartBeat()

      self.FinishProcessing()
      for fd in itervalues(self.stats):
        fd.Close()

      logging.info("%s: processed %d clients.", self.__class__.__name__,
                   processed_count)
    except Exception as e:  # pylint: disable=broad-except
      logging.exception("Error while calculating stats: %s", e)
      raise
Exemple #29
0
    def ReadHuntClientResourcesStats(self, hunt_id, cursor=None):
        """Read/calculate hunt client resources stats."""
        hunt_id_int = db_utils.HuntIDToInt(hunt_id)

        query = """
      SELECT
        COUNT(*),
        SUM(user_cpu_time_used_micros),
        SUM((user_cpu_time_used_micros) * (user_cpu_time_used_micros)),
        SUM(system_cpu_time_used_micros),
        SUM((system_cpu_time_used_micros) * (system_cpu_time_used_micros)),
        SUM(network_bytes_sent),
        SUM(network_bytes_sent * network_bytes_sent),
    """

        scaled_bins = [
            int(1000000 * b)
            for b in rdf_stats.ClientResourcesStats.CPU_STATS_BINS
        ]

        query += self._BinsToQuery(scaled_bins, "(user_cpu_time_used_micros)")
        query += ","
        query += self._BinsToQuery(scaled_bins,
                                   "(system_cpu_time_used_micros)")
        query += ","
        query += self._BinsToQuery(
            rdf_stats.ClientResourcesStats.NETWORK_STATS_BINS,
            "network_bytes_sent")

        query += " FROM flows "
        query += "FORCE INDEX(flows_by_hunt) "
        query += "WHERE parent_hunt_id = %s AND parent_flow_id IS NULL"

        cursor.execute(query, [hunt_id_int])

        response = cursor.fetchone()
        (count, user_sum, user_sq_sum, system_sum, system_sq_sum, network_sum,
         network_sq_sum) = response[:7]

        stats = rdf_stats.ClientResourcesStats(
            user_cpu_stats=rdf_stats.RunningStats(
                num=count,
                sum=db_utils.MicrosToSeconds(int(user_sum or 0)),
                sum_sq=int(user_sq_sum or 0) / 1e12,
            ),
            system_cpu_stats=rdf_stats.RunningStats(
                num=count,
                sum=db_utils.MicrosToSeconds(int(system_sum or 0)),
                sum_sq=int(system_sq_sum or 0) / 1e12,
            ),
            network_bytes_sent_stats=rdf_stats.RunningStats(
                num=count,
                sum=float(network_sum or 0),
                sum_sq=float(network_sq_sum or 0),
            ),
        )

        offset = 7
        stats.user_cpu_stats.histogram = rdf_stats.StatsHistogram()
        for b_num, b_max_value in zip(
                response[offset:],
                rdf_stats.ClientResourcesStats.CPU_STATS_BINS):
            stats.user_cpu_stats.histogram.bins.append(
                rdf_stats.StatsHistogramBin(range_max_value=b_max_value,
                                            num=b_num))

        offset += len(rdf_stats.ClientResourcesStats.CPU_STATS_BINS)
        stats.system_cpu_stats.histogram = rdf_stats.StatsHistogram()
        for b_num, b_max_value in zip(
                response[offset:],
                rdf_stats.ClientResourcesStats.CPU_STATS_BINS):
            stats.system_cpu_stats.histogram.bins.append(
                rdf_stats.StatsHistogramBin(range_max_value=b_max_value,
                                            num=b_num))

        offset += len(rdf_stats.ClientResourcesStats.CPU_STATS_BINS)
        stats.network_bytes_sent_stats.histogram = rdf_stats.StatsHistogram()
        for b_num, b_max_value in zip(
                response[offset:],
                rdf_stats.ClientResourcesStats.NETWORK_STATS_BINS):
            stats.network_bytes_sent_stats.histogram.bins.append(
                rdf_stats.StatsHistogramBin(range_max_value=b_max_value,
                                            num=b_num))

        query = """
      SELECT
        client_id, flow_id, user_cpu_time_used_micros,
        system_cpu_time_used_micros, network_bytes_sent
      FROM flows
      FORCE INDEX(flows_by_hunt)
      WHERE parent_hunt_id = %s AND parent_flow_id IS NULL AND
            (user_cpu_time_used_micros > 0 OR
             system_cpu_time_used_micros > 0 OR
             network_bytes_sent > 0)
      ORDER BY (user_cpu_time_used_micros + system_cpu_time_used_micros) DESC
      LIMIT 10
    """

        cursor.execute(query, [hunt_id_int])

        for cid, fid, ucpu, scpu, nbs in cursor.fetchall():
            client_id = db_utils.IntToClientID(cid)
            flow_id = db_utils.IntToFlowID(fid)
            stats.worst_performers.append(
                rdf_client_stats.ClientResources(
                    client_id=client_id,
                    session_id=rdfvalue.RDFURN(client_id).Add(flow_id),
                    cpu_usage=rdf_client_stats.CpuSeconds(
                        user_cpu_time=db_utils.MicrosToSeconds(ucpu),
                        system_cpu_time=db_utils.MicrosToSeconds(scpu),
                    ),
                    network_bytes_sent=nbs))

        return stats
 def testRaisesWhenNeitherPythonHackNorExecutableURNIsPassed(self):
     with self.assertRaises(ValueError):
         signed_binary_utils.SignedBinaryIDFromURN(
             rdfvalue.RDFURN("aff4:/foo/bar"))