Ejemplo n.º 1
0
 def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
           knowledge_base):
     """Parse the mount command output."""
     _ = stderr, time_taken, args, knowledge_base  # Unused.
     self.CheckReturn(cmd, return_val)
     result = rdf_protodict.AttributedDict()
     for entry in self._field_parser.ParseEntries(stdout):
         line_str = " ".join(entry)
         mount_rslt = self.mount_re.match(line_str)
         if mount_rslt:
             device, mount_point, fs_type, option_str = mount_rslt.groups()
             result = rdf_client_fs.Filesystem()
             result.device = device
             result.mount_point = mount_point
             result.type = fs_type
             # Parse these options as a dict as some items may be key/values.
             # KeyValue parser uses OrderedDict as the native parser method. Use it.
             options = KeyValueParser(
                 term=",").ParseToOrderedDict(option_str)
             # Keys without values get assigned [] by default. Because these keys are
             # actually true, if declared, change any [] values to True.
             for k, v in iteritems(options):
                 options[k] = v or [True]
             result.options = rdf_protodict.AttributedDict(**options)
             yield result
Ejemplo n.º 2
0
    def testParse(self):
        filt = filters.ItemFilter()

        one = rdf_protodict.AttributedDict(test1="1", test2=[2, 3])
        foo = rdf_protodict.AttributedDict(test1="foo", test2=["bar", "baz"])
        fs = rdf_client_fs.Filesystem(device="/dev/sda1", mount_point="/root")
        objs = [one, foo, fs]

        results = filt.Parse(objs, u"test1 is '1'")
        self.assertLen(results, 1)
        self.assertEqual("test1", results[0].key)
        self.assertEqual("1", results[0].value)

        results = filt.Parse(objs, u"test1 is '2'")
        self.assertFalse(results)

        results = filt.Parse(objs, u"test2 contains 3")
        self.assertLen(results, 1)
        self.assertEqual("test2", results[0].key)
        self.assertEqual([2, 3], results[0].value)

        results = filt.Parse(objs, u"test1 is '1' or test1 contains 'foo'")
        self.assertLen(results, 2)
        self.assertEqual("test1", results[0].key)
        self.assertEqual("1", results[0].value)
        self.assertEqual("test1", results[1].key)
        self.assertEqual("foo", results[1].value)

        results = filt.Parse(objs, u"mount_point is '/root'")
        self.assertLen(results, 1)
        self.assertEqual("mount_point", results[0].key)
        self.assertEqual("/root", results[0].value)
Ejemplo n.º 3
0
 def testInitialize(self):
   arnie = {"target": "Sarah Connor", "mission": "Protect"}
   t800 = {"target": "Sarah Connor", "mission": "Terminate"}
   terminator = rdf_protodict.AttributedDict(arnie)
   self.assertEqual(terminator.GetItem("target"), "Sarah Connor")
   self.assertEqual(terminator.GetItem("mission"), "Protect")
   terminator = rdf_protodict.AttributedDict(t800)
   self.assertEqual(terminator.target, "Sarah Connor")
   self.assertEqual(terminator.mission, "Terminate")
   # We don't want a conflicted Terminator
   self.assertFalse(terminator.GetItem("happy_face"))
Ejemplo n.º 4
0
    def testParse(self):
        filt = filters.ObjectFilter()

        hit1 = rdf_protodict.AttributedDict(test="hit1")
        hit2 = rdf_protodict.AttributedDict(test="hit2")
        miss = rdf_protodict.AttributedDict(test="miss")
        objs = [hit1, hit2, miss]
        results = filt.Parse(objs, u"test is 'hit1'")
        self.assertCountEqual([hit1], results)
        results = filt.Parse(objs, u"test is 'hit2'")
        self.assertCountEqual([hit2], results)
        results = filt.Parse(objs, u"test inset 'hit1,hit2'")
        self.assertCountEqual([hit1, hit2], results)
Ejemplo n.º 5
0
 def testInitFromNonStringKeyedDictRaises(self):
   with self.assertRaises(TypeError):
     rdf_protodict.AttributedDict({
         1: "foo",
         2: "bar",
         3: "baz",
     })
Ejemplo n.º 6
0
  def ParseFile(self, knowledge_base, pathspec, filedesc):
    """Identifies the paths set within a file.

    Expands paths within the context of the file, but does not infer fully
    expanded paths from external states. There are plenty of cases where path
    attributes are unresolved, e.g. sourcing other files.

    Lines are not handled literally. A field parser is used to:
    - Break lines with multiple distinct statements into separate lines (e.g.
      lines with a ';' separating stanzas.
    - Strip out comments.
    - Handle line continuations to capture multi-line configurations into one
      statement.

    Args:
      knowledge_base: A knowledgebase for the client to whom the file belongs.
      pathspec: A pathspec corresponding to the parsed file.
      filedesc: A file-like object to parse.

    Yields:
      An attributed dict for each env vars. 'name' contains the path name, and
      'vals' contains its vals.
    """
    del knowledge_base  # Unused.

    lines = self.parser.ParseEntries(utils.ReadFileBytesAsUnicode(filedesc))
    if os.path.basename(pathspec.path) in self._CSH_FILES:
      paths = self._ParseCshVariables(lines)
    else:
      paths = self._ParseShVariables(lines)
    for path_name, path_vals in iteritems(paths):
      yield rdf_protodict.AttributedDict(
          config=pathspec.path, name=path_name, vals=path_vals)
Ejemplo n.º 7
0
  def Parse(self, stat, file_obj, unused_knowledge_base):
    lines = set([
        l.strip() for l in utils.ReadFileBytesAsUnicode(file_obj).splitlines()
    ])

    users = []
    bad_lines = []
    for line in lines:
      # behaviour of At/Cron is undefined for lines with whitespace separated
      # fields/usernames
      if " " in line:
        bad_lines.append(line)
      elif line:  # drop empty lines
        users.append(line)

    filename = stat.pathspec.path
    cfg = {"filename": filename, "users": users}
    yield rdf_protodict.AttributedDict(**cfg)

    if bad_lines:
      yield rdf_anomaly.Anomaly(
          type="PARSER_ANOMALY",
          symptom="Dodgy entries in %s." % (filename),
          reference_pathspec=stat.pathspec,
          finding=bad_lines)
Ejemplo n.º 8
0
    def ParseFile(
        self,
        knowledge_base: rdf_client.KnowledgeBase,
        pathspec: rdf_paths.PathSpec,
        filedesc: IO[bytes],
    ) -> Iterator[rdf_protodict.AttributedDict]:
        del knowledge_base  # Unused.

        lines = set([
            l.strip()
            for l in utils.ReadFileBytesAsUnicode(filedesc).splitlines()
        ])

        users = []
        bad_lines = []
        for line in lines:
            # behaviour of At/Cron is undefined for lines with whitespace separated
            # fields/usernames
            if " " in line:
                bad_lines.append(line)
            elif line:  # drop empty lines
                users.append(line)

        filename = pathspec.path
        cfg = {"filename": filename, "users": users}
        yield rdf_protodict.AttributedDict(**cfg)

        if bad_lines:
            yield rdf_anomaly.Anomaly(type="PARSER_ANOMALY",
                                      symptom="Dodgy entries in %s." %
                                      (filename),
                                      reference_pathspec=pathspec,
                                      finding=bad_lines)
Ejemplo n.º 9
0
    def Parse(self, stat, file_obj, knowledge_base):
        """Identifies the paths set within a file.

    Expands paths within the context of the file, but does not infer fully
    expanded paths from external states. There are plenty of cases where path
    attributes are unresolved, e.g. sourcing other files.

    Lines are not handled literally. A field parser is used to:
    - Break lines with multiple distinct statements into separate lines (e.g.
      lines with a ';' separating stanzas.
    - Strip out comments.
    - Handle line continuations to capture multi-line configurations into one
      statement.

    Args:
      stat: statentry
      file_obj: VFSFile
      knowledge_base: unused

    Yields:
      An attributed dict for each env vars. 'name' contains the path name, and
      'vals' contains its vals.
    """
        _ = knowledge_base
        lines = self.parser.ParseEntries(file_obj.read())
        if os.path.basename(stat.pathspec.path) in self._CSH_FILES:
            paths = self._ParseCshVariables(lines)
        else:
            paths = self._ParseShVariables(lines)
        for path_name, path_vals in iteritems(paths):
            yield rdf_protodict.AttributedDict(config=stat.pathspec.path,
                                               name=path_name,
                                               vals=path_vals)
Ejemplo n.º 10
0
    def testInitFromCronObject(self):
        state = rdf_protodict.AttributedDict()
        state["quux"] = "norf"
        state["thud"] = "blargh"

        cron_job = rdf_cronjobs.CronJob()
        cron_job.cron_job_id = "foo"
        cron_job.current_run_id = "bar"
        cron_job.last_run_time = self._DATETIME("2001-01-01")
        cron_job.last_run_status = "FINISHED"
        cron_job.frequency = rdfvalue.Duration.From(1, rdfvalue.DAYS)
        cron_job.lifetime = rdfvalue.Duration.From(30, rdfvalue.DAYS)
        cron_job.enabled = False
        cron_job.forced_run_requested = True
        cron_job.state = state
        cron_job.description = "testdescription"

        api_cron_job = cron_plugin.ApiCronJob.InitFromObject(cron_job)

        self.assertEqual(api_cron_job.cron_job_id, "foo")
        self.assertEqual(api_cron_job.current_run_id, "bar")
        self.assertEqual(api_cron_job.description, "testdescription")
        self.assertEqual(api_cron_job.last_run_time,
                         self._DATETIME("2001-01-01"))
        self.assertEqual(api_cron_job.last_run_status, "FINISHED")
        self.assertEqual(api_cron_job.frequency,
                         rdfvalue.Duration.From(1, rdfvalue.DAYS))
        self.assertEqual(api_cron_job.lifetime,
                         rdfvalue.Duration.From(30, rdfvalue.DAYS))
        self.assertFalse(api_cron_job.enabled)
        self.assertTrue(api_cron_job.forced_run_requested)

        api_state_items = {_.key: _.value for _ in api_cron_job.state.items}
        self.assertEqual(api_state_items, {"quux": "norf", "thud": "blargh"})
Ejemplo n.º 11
0
 def WriteState(self):
     if "w" in self.mode:
         self._ValidateState()
         self.Set(self.Schema.FLOW_ARGS(self.args))
         self.Set(self.Schema.FLOW_CONTEXT(self.context))
         self.Set(self.Schema.FLOW_RUNNER_ARGS(self.runner_args))
         protodict = rdf_protodict.AttributedDict().FromDict(self.state)
         self.Set(self.Schema.FLOW_STATE_DICT(protodict))
Ejemplo n.º 12
0
 def CreatePluginAndDefaultState(cls, source_urn=None, args=None, token=None):
   state = rdf_protodict.AttributedDict()
   state["source_urn"] = source_urn
   state["args"] = args
   state["token"] = token
   plugin = cls(source_urn=source_urn, args=args, token=token)
   plugin.InitializeState(state)
   return plugin, state
Ejemplo n.º 13
0
 def testRdfFormatterAttributedDict(self):
     sshd = rdf_config_file.SshdConfig()
     sshd.config = rdf_protodict.AttributedDict(skynet="operational")
     template = "{config.skynet}"
     hinter = hints.Hinter(template=template)
     expected = "operational"
     result = hinter.Render(sshd)
     self.assertEqual(expected, result)
Ejemplo n.º 14
0
  def testGetBytestringItem(self):
    adict = rdf_protodict.AttributedDict()
    adict.foo = 42
    adict.bar = b"quux"
    adict.baz = [4, 8, 15, 16, 23, 42]

    self.assertEqual(adict[b"foo"], 42)
    self.assertEqual(adict[b"bar"], b"quux")
    self.assertEqual(adict[b"baz"], [4, 8, 15, 16, 23, 42])
Ejemplo n.º 15
0
 def _Load(self, expression):
   self._Flush()
   parser = config_file.KeyValueParser(
       kv_sep=":", sep=",", term=(r"\s+", r"\n"))
   parsed = {}
   for entry in parser.ParseEntries(expression):
     parsed.update(entry)
   self.cfg = rdf_protodict.AttributedDict(parsed)
   return parsed
Ejemplo n.º 16
0
  def testSetStringItem(self):
    adict = rdf_protodict.AttributedDict()
    adict["foo"] = 42
    adict["bar"] = b"quux"
    adict["baz"] = [4, 8, 15, 16, 23, 42]

    self.assertEqual(adict.foo, 42)
    self.assertEqual(adict.bar, b"quux")
    self.assertEqual(adict.baz, [4, 8, 15, 16, 23, 42])
Ejemplo n.º 17
0
  def testInitFromBytestringKeyedDict(self):
    adict = rdf_protodict.AttributedDict({
        b"foo": 42,
        b"bar": b"quux",
        b"baz": [4, 8, 15, 16, 23, 42],
    })

    self.assertEqual(adict.foo, 42)
    self.assertEqual(adict.bar, b"quux")
    self.assertEqual(adict.baz, [4, 8, 15, 16, 23, 42])
Ejemplo n.º 18
0
 def CreatePluginAndDefaultState(cls, source_urn=None, args=None):
     """Creates a plugin and returns its initial state."""
     state = rdf_protodict.AttributedDict()
     state["source_urn"] = source_urn
     if args is not None:
         args.Validate()
     state["args"] = args
     plugin = cls(source_urn=source_urn, args=args)
     plugin.InitializeState(state)
     return plugin, state
Ejemplo n.º 19
0
  def testParse(self):
    filt = filters.ForEach()

    hit1 = rdf_protodict.AttributedDict(k1="v1", k2="v2", k3="v3")
    hit2 = rdf_protodict.AttributedDict(k1="v4", k2="v5", k3="v6")
    meta = rdf_protodict.AttributedDict(
        foo=["foo", "bar"], target=[hit1, hit2], null=[])
    objs = [meta]

    results = filt.Parse(objs, "target")
    self.assertEqual(2, len(results))
    self.assertItemsEqual([hit1, hit2], [r.item for r in results])

    results = filt.Parse(objs, "foo")
    self.assertEqual(2, len(results))
    self.assertItemsEqual(["foo", "bar"], [r.item for r in results])

    results = filt.Parse(objs, "null")
    self.assertEqual(0, len(results))
Ejemplo n.º 20
0
 def ParseObjs(self, objs, expression):
   for key in self._Attrs(expression):
     # Key needs to be a string for rdfvalue.KeyValue
     key = utils.SmartStr(key)
     for obj in objs:
       val = self._GetVal(obj, key)
       if val:
         # Dict won't accept rdfvalue.RepeatedFieldHelper
         if isinstance(val, rdf_structs.RepeatedFieldHelper):
           val = list(val)
         yield rdf_protodict.AttributedDict({"key": key, "value": val})
Ejemplo n.º 21
0
 def testParse(self):
     filt = filters.RDFFilter()
     cfg = rdf_protodict.AttributedDict()
     anom = rdf_anomaly.Anomaly()
     objs = [cfg, anom]
     results = filt.Parse(objs, "KnowledgeBase")
     self.assertFalse(results)
     results = filt.Parse(objs, "AttributedDict,KnowledgeBase")
     self.assertCountEqual([cfg], results)
     results = filt.Parse(objs, "Anomaly,AttributedDict,KnowledgeBase")
     self.assertCountEqual(objs, results)
Ejemplo n.º 22
0
    def ParseFile(self, knowledge_base, pathspec, filedesc):
        del knowledge_base  # Unused.

        lines = set(l.strip() for l in filedesc.read().splitlines())

        users = list(filter(None, lines))

        filename = pathspec.path
        cfg = {"filename": filename, "users": users}

        yield rdf_protodict.AttributedDict(**cfg)
Ejemplo n.º 23
0
  def Parse(self, stat, file_obj, knowledge_base):

    del knowledge_base  # Unused.

    lines = set([l.strip() for l in file_obj.read().splitlines()])

    users = list(filter(None, lines))

    filename = stat.pathspec.path
    cfg = {"filename": filename, "users": users}

    yield rdf_protodict.AttributedDict(**cfg)
Ejemplo n.º 24
0
    def testParse(self):
        filt = filters.AttrFilter()

        hit1 = rdf_protodict.AttributedDict(k1="hit1", k2="found1", k3=[3, 4])
        hit2 = rdf_protodict.AttributedDict(k1="hit2", k2="found2")
        meta = rdf_protodict.AttributedDict(one=hit1, two=hit2)
        objs = [hit1, hit2, meta]

        results = filt.Parse(objs, "k1 k2 one.k3")
        self.assertLen(results, 5)
        r1, r2, r3, r4, r5 = results
        self.assertEqual("k1", r1.key)
        self.assertEqual("hit1", r1.value)
        self.assertEqual("k1", r2.key)
        self.assertEqual("hit2", r2.value)
        self.assertEqual("k2", r3.key)
        self.assertEqual("found1", r3.value)
        self.assertEqual("k2", r4.key)
        self.assertEqual("found2", r4.value)
        self.assertEqual("one.k3", r5.key)
        self.assertEqual([3, 4], r5.value)
Ejemplo n.º 25
0
    def ParseFiles(
        self,
        knowledge_base: rdf_client.KnowledgeBase,
        pathspecs: Iterable[rdf_paths.PathSpec],
        filedescs: Iterable[IO[bytes]],
    ) -> Iterator[rdf_protodict.AttributedDict]:
        del knowledge_base  # Unused.

        config = {}
        for pathspec, file_obj in zip(pathspecs, filedescs):
            k, v = self._Parse(pathspec, file_obj)
            config[k] = v
        yield rdf_protodict.AttributedDict(config)
Ejemplo n.º 26
0
 def Parse(self, cmd, args, stdout, stderr, return_val, knowledge_base):
     """Parse the sysctl output."""
     _ = stderr, args, knowledge_base  # Unused.
     self.CheckReturn(cmd, return_val)
     result = rdf_protodict.AttributedDict()
     # The KeyValueParser generates an ordered dict by default. The sysctl vals
     # aren't ordering dependent, but there's no need to un-order it.
     for k, v in self.lexer.ParseToOrderedDict(stdout).items():
         key = k.replace(".", "_")
         if len(v) == 1:
             v = v[0]
         result[key] = v
     return [result]
Ejemplo n.º 27
0
  def testNestedAssignment(self):
    adict = rdf_protodict.AttributedDict()

    adict["foo"] = {}
    adict["foo"]["bar"] = 42
    adict["foo"][b"baz"] = "Lorem ipsum."

    adict[b"quux"] = {}
    adict[b"quux"]["norf"] = [4, 8, 15, 16, 23, 42]
    adict[b"quux"][b"thud"] = 3.14

    self.assertEqual(adict.foo["bar"], 42)
    self.assertEqual(adict.foo[b"baz"], "Lorem ipsum.")
    self.assertEqual(adict.quux["norf"], [4, 8, 15, 16, 23, 42])
    self.assertEqual(adict.quux[b"thud"], 3.14)
Ejemplo n.º 28
0
 def Parse(self, unused_stat, file_obj, unused_knowledge_base):
   for entry in self.ParseEntries(file_obj.read()):
     if not entry:
       continue
     result = rdf_client_fs.Filesystem()
     result.device = entry[0].decode("string_escape")
     result.mount_point = entry[1].decode("string_escape")
     result.type = entry[2].decode("string_escape")
     options = KeyValueParser(term=",").ParseToOrderedDict(entry[3])
     # Keys without values get assigned [] by default. Because these keys are
     # actually true, if declared, change any [] values to True.
     for k, v in iteritems(options):
       options[k] = v or [True]
     result.options = rdf_protodict.AttributedDict(**options)
     yield result
Ejemplo n.º 29
0
    def ParseMultiple(self, stats, file_objects, knowledge_base):

        del knowledge_base  # Unused.

        lines = set()
        for file_obj in file_objects:
            lines.update(set(l.strip() for l in file_obj.read().splitlines()))

        users = list(filter(None, lines))

        for stat in stats:
            filename = stat.pathspec.path
            cfg = {"filename": filename, "users": users}

            yield rdf_protodict.AttributedDict(**cfg)
Ejemplo n.º 30
0
 def _GenConfig(self, cfg):
   """Interpolate configurations with defaults to generate actual configs."""
   # Some setting names may have a + or - suffix. These indicate that the
   # settings modify the default values.
   merged = self.default.copy()
   for setting, vals in iteritems(cfg):
     option, operator = (setting.split(None, 1) + [None])[:2]
     vals = set(vals)
     default = set(self.default.get(option, []))
     # If there is an operator, updated values accordingly.
     if operator == "+":
       vals = default.union(vals)
     elif operator == "-":
       vals = default.difference(vals)
     merged[option] = list(vals)
   return rdf_protodict.AttributedDict(**merged)