Esempio n. 1
0
  def ParseFiles(self, responses):
    """Take each file we retrieved and get the history from it."""
    # Note that some of these Find requests will fail because some paths don't
    # exist, e.g. Chromium on most machines, so we don't check for success.
    if responses:
      for response in responses:
        if data_store.RelationalDBReadEnabled("filestore"):
          client_path = db.ClientPath.FromPathSpec(self.client_id,
                                                   response.stat_entry.pathspec)
          fd = file_store.OpenFile(client_path)
        else:
          fd = aff4.FACTORY.Open(
              response.stat_entry.AFF4Path(self.client_urn), token=self.token)

        hist = chrome_history.ChromeParser(fd)
        count = 0
        for epoch64, dtype, url, dat1, dat2, dat3 in hist.Parse():
          count += 1
          str_entry = "%s %s %s %s %s %s" % (datetime.datetime.utcfromtimestamp(
              epoch64 / 1e6), url, dat1, dat2, dat3, dtype)
          self.SendReply(rdfvalue.RDFString(utils.SmartStr(str_entry)))

        self.Log("Wrote %d Chrome History entries for user %s from %s", count,
                 self.args.username, response.stat_entry.pathspec.Basename())
        self.state.hist_count += count
Esempio n. 2
0
    def testBasicParsingOldFormat(self):
        """Test we can parse a standard file."""
        history_file = os.path.join(self.base_path, "parser_test", "History")
        with io.open(history_file, mode="rb") as history_filedesc:
            history = chrome_history.ChromeParser()
            entries = list(history.Parse(history_file, history_filedesc))

        try:
            dt1 = datetime.datetime(1970, 1, 1)
            dt1 += datetime.timedelta(microseconds=entries[0][0])
        except (TypeError, ValueError):
            dt1 = entries[0][0]

        try:
            dt2 = datetime.datetime(1970, 1, 1)
            dt2 += datetime.timedelta(microseconds=entries[-1][0])
        except (TypeError, ValueError):
            dt2 = entries[-1][0]

        # Check that our results are properly time ordered
        time_results = [x[0] for x in entries]
        self.assertEqual(time_results, sorted(time_results))

        self.assertEqual(str(dt1), "2011-04-07 12:03:11")
        self.assertEqual(entries[0][2],
                         "http://start.ubuntu.com/10.04/Google/")

        self.assertEqual(str(dt2), "2011-05-23 08:37:27.061516")
        self.assertStartsWith(
            entries[-1][2], "https://chrome.google.com/webs"
            "tore/detail/mfjkgbjaikamkkojmak"
            "jclmkianficch")

        self.assertLen(entries, 71)
Esempio n. 3
0
    def testBasicParsing(self):
        """Test we can parse a standard file."""
        history_file = os.path.join(self.base_path, "parser_test", "History2")
        with io.open(history_file, mode="rb") as history_filedesc:
            history = chrome_history.ChromeParser()
            entries = list(history.Parse(history_file, history_filedesc))

        try:
            dt1 = datetime.datetime(1970, 1, 1)
            dt1 += datetime.timedelta(microseconds=entries[0][0])
        except (TypeError, ValueError):
            dt1 = entries[0][0]

        try:
            dt2 = datetime.datetime(1970, 1, 1)
            dt2 += datetime.timedelta(microseconds=entries[-1][0])
        except (TypeError, ValueError):
            dt2 = entries[-1][0]

        # Check that our results are properly time ordered
        time_results = [x[0] for x in entries]
        self.assertEqual(time_results, sorted(time_results))

        self.assertEqual(str(dt1), "2013-05-03 15:11:26.556635")
        self.assertStartsWith(
            entries[0][2], "https://www.google.ch/search?q=why+you+shouldn")

        self.assertEqual(str(dt2), "2013-05-03 15:11:39.763984")
        self.assertStartsWith(entries[-1][2], "http://www.test.ch/")

        self.assertLen(entries, 4)
Esempio n. 4
0
  def testNonSqliteDatabase(self):
    with temp.AutoTempFilePath(suffix="-journal") as filepath:
      with io.open(filepath, "wb") as filedesc:
        filedesc.write(b"foobar")

      with io.open(filepath, "rb") as filedesc:
        # This should not fail, but return an empty list of results.
        results = list(chrome_history.ChromeParser().Parse(filepath, filedesc))
        self.assertEmpty(results)
Esempio n. 5
0
    def testTimeOrderingDownload(self):
        """Test we can correctly time order downloads and visits."""
        history_file = os.path.join(self.base_path, "parser_test", "History3")
        history = chrome_history.ChromeParser(open(history_file, "rb"))
        entries = [x for x in history.Parse()]

        # Check that our results are properly time ordered
        time_results = [x[0] for x in entries]
        self.assertEqual(time_results, sorted(time_results))
        self.assertLen(entries, 23)
Esempio n. 6
0
  def ParseFiles(self, responses):
    """Take each file we retrieved and get the history from it."""
    # Note that some of these Find requests will fail because some paths don't
    # exist, e.g. Chromium on most machines, so we don't check for success.
    if responses:
      for response in responses:
        client_path = db.ClientPath.FromPathSpec(self.client_id,
                                                 response.stat_entry.pathspec)
        filepath = response.stat_entry.pathspec.CollapsePath()
        fd = file_store.OpenFile(client_path)
        hist = chrome_history.ChromeParser()
        count = 0
        for epoch64, dtype, url, dat1, dat2, dat3 in hist.Parse(filepath, fd):
          count += 1
          str_entry = "%s %s %s %s %s %s" % (datetime.datetime.utcfromtimestamp(
              epoch64 / 1e6), url, dat1, dat2, dat3, dtype)
          self.SendReply(rdfvalue.RDFString(str_entry))

        self.Log("Wrote %d Chrome History entries for user %s from %s", count,
                 self.args.username, response.stat_entry.pathspec.Basename())
        self.state.hist_count += count