Ejemplo n.º 1
0
 def Touch(self, vfs_path, content=b""):
   path_type, components = rdf_objects.ParseCategorizedPath(vfs_path)
   client_path = db.ClientPath(
       client_id=self.client_id.Basename(),
       path_type=path_type,
       components=components)
   vfs_test_lib.CreateFile(client_path, content=content)
Ejemplo n.º 2
0
    def CheckFilesDownloaded(self, fnames):
        for fname in fnames:
            if data_store.RelationalDBReadEnabled(category="vfs"):
                path_info = data_store.REL_DB.ReadPathInfo(
                    self.client_id.Basename(),
                    rdf_objects.PathInfo.PathType.OS,
                    components=self.FilenameToPathComponents(fname))
                size = path_info.stat_entry.st_size

            else:
                file_urn = self.FileNameToURN(fname)
                with aff4.FACTORY.Open(file_urn, token=self.token) as fd:
                    size = fd.Get(fd.Schema.SIZE)

            self.assertGreater(size, 100)

            if data_store.RelationalDBReadEnabled(category="filestore"):
                fd = file_store.OpenLatestFileVersion(
                    db.ClientPath(
                        self.client_id.Basename(),
                        rdf_objects.PathInfo.PathType.OS,
                        components=self.FilenameToPathComponents(fname)))

                # Make sure we can actually read the file.
                self.assertEqual(len(fd.read()), size)
Ejemplo n.º 3
0
    def CheckFilesDownloaded(self, fnames):
        for fname in fnames:
            if data_store.RelationalDBReadEnabled(category="vfs"):
                path_info = data_store.REL_DB.ReadPathInfo(
                    self.client_id.Basename(),
                    rdf_objects.PathInfo.PathType.OS,
                    components=self.FilenameToPathComponents(fname))
                size = path_info.stat_entry.st_size

            else:
                file_urn = self.FileNameToURN(fname)
                with aff4.FACTORY.Open(file_urn, token=self.token) as fd:
                    size = fd.Get(fd.Schema.SIZE)

            with io.open(os.path.join(self.base_path, "searching",
                                      fname)) as fd:
                test_data = fd.read()

            self.assertEqual(size, len(test_data))

            if data_store.RelationalDBReadEnabled(category="filestore"):
                fd = file_store.OpenFile(
                    db.ClientPath(
                        self.client_id.Basename(),
                        rdf_objects.PathInfo.PathType.OS,
                        components=self.FilenameToPathComponents(fname)))

                # Make sure we can actually read the file.
                self.assertEqual(fd.read(), test_data)
Ejemplo n.º 4
0
def CreateFileVersion(client_id, path, content=b"", timestamp=None, token=None):
  """Add a new version for a file."""
  if timestamp is None:
    timestamp = rdfvalue.RDFDatetime.Now()

  with test_lib.FakeTime(timestamp):
    path_type, components = rdf_objects.ParseCategorizedPath(path)
    client_path = db.ClientPath(client_id.Basename(), path_type, components)
    vfs_test_lib.CreateFile(client_path, content=content, token=token)
Ejemplo n.º 5
0
    def _ReadTestFile(self,
                      path_components,
                      path_type=rdf_objects.PathInfo.PathType.TSK):
        components = self.base_path.strip("/").split("/")
        components += path_components

        fd = file_store.OpenFile(
            db.ClientPath(self.client_id.Basename(),
                          path_type,
                          components=tuple(components)))
        return fd.read(10000000)
Ejemplo n.º 6
0
  def CreateFileVersions(self, client_id, file_path):
    """Add a new version for a file."""
    path_type, components = rdf_objects.ParseCategorizedPath(file_path)
    client_path = db.ClientPath(client_id.Basename(), path_type, components)
    token = access_control.ACLToken(username="******")

    with test_lib.FakeTime(self.time_1):
      vfs_test_lib.CreateFile(
          client_path, "Hello World".encode("utf-8"), token=token)

    with test_lib.FakeTime(self.time_2):
      vfs_test_lib.CreateFile(
          client_path, "Goodbye World".encode("utf-8"), token=token)
Ejemplo n.º 7
0
  def testIsDirectoryFlag(self):
    # Set up a directory.
    dir_path = "fs/os/Random/Directory"
    path_type, components = rdf_objects.ParseCategorizedPath(dir_path)
    client_path = db.ClientPath(self.client_id.Basename(), path_type,
                                components)
    token = access_control.ACLToken(username="******")
    vfs_test_lib.CreateDirectory(client_path, token=token)

    args = vfs_plugin.ApiGetFileDetailsArgs(
        client_id=self.client_id, file_path=self.file_path)
    result = self.handler.Handle(args, token=self.token)
    self.assertFalse(result.file.is_directory)

    args = vfs_plugin.ApiGetFileDetailsArgs(
        client_id=self.client_id, file_path=dir_path)
    result = self.handler.Handle(args, token=self.token)
    self.assertTrue(result.file.is_directory)
Ejemplo n.º 8
0
  def testGetArtifact(self):
    """Test we can get a basic artifact."""

    client_mock = action_mocks.FileFinderClientMock()
    client_id = self.SetupClient(0, system="Linux")

    # Dynamically add an ArtifactSource specifying the base path.
    file_path = os.path.join(self.base_path, "hello.exe")
    coll1 = rdf_artifacts.ArtifactSource(
        type=rdf_artifacts.ArtifactSource.SourceType.FILE,
        attributes={"paths": [file_path]})
    self.fakeartifact.sources.append(coll1)

    artifact_list = ["FakeArtifact"]
    flow_test_lib.TestFlowHelper(
        aff4_flows.ArtifactCollectorFlow.__name__,
        client_mock,
        artifact_list=artifact_list,
        use_tsk=False,
        token=self.token,
        client_id=client_id)

    fd2 = open(file_path, "rb")
    fd2.seek(0, 2)
    expected_size = fd2.tell()

    if data_store.AFF4Enabled():
      # Test the AFF4 file that was created.
      fd1 = aff4.FACTORY.Open(
          "%s/fs/os/%s" % (client_id, file_path), token=self.token)
      size = fd1.Get(fd1.Schema.SIZE)
      self.assertEqual(size, expected_size)
    else:
      components = file_path.strip("/").split("/")
      fd = file_store.OpenFile(
          db.ClientPath(
              client_id.Basename(),
              rdf_objects.PathInfo.PathType.OS,
              components=tuple(components)))
      fd.Seek(0, 2)
      size = fd.Tell()
      self.assertEqual(size, expected_size)
Ejemplo n.º 9
0
    def _MigrateVfsUrnGroup(self, vfs_urns):
        """Migrates history of given group of VFS URNs."""
        client_path_histories = dict()

        for fd in aff4.FACTORY.MultiOpen(vfs_urns, age=aff4.ALL_TIMES):
            client_id, vfs_path = fd.urn.Split(2)
            path_type, components = rdf_objects.ParseCategorizedPath(vfs_path)

            client_path = db.ClientPath(client_id, path_type, components)
            client_path_history = db.ClientPathHistory()

            for stat_entry in fd.GetValuesForAttribute(fd.Schema.STAT):
                client_path_history.AddStatEntry(stat_entry.age, stat_entry)

            for hash_entry in fd.GetValuesForAttribute(fd.Schema.HASH):
                client_path_history.AddHashEntry(hash_entry.age, hash_entry)

            client_path_histories[client_path] = client_path_history

        data_store.REL_DB.MultiWritePathHistory(client_path_histories)
Ejemplo n.º 10
0
    def CheckFilesNotDownloaded(self, fnames):
        for fname in fnames:
            if data_store.RelationalDBReadEnabled(category="filestore"):
                try:
                    file_store.OpenFile(
                        db.ClientPath(
                            self.client_id.Basename(),
                            rdf_objects.PathInfo.PathType.OS,
                            components=self.FilenameToPathComponents(fname)))
                    self.Fail("Found downloaded file: %s" % fname)
                except file_store.FileHasNoContentError:
                    pass
            else:
                file_urn = self.FileNameToURN(fname)
                with aff4.FACTORY.Open(file_urn, token=self.token) as fd:
                    # Directories have no size attribute.
                    if fd.Get(fd.Schema.TYPE
                              ) == aff4_standard.VFSDirectory.__name__:
                        continue

                    self.assertEqual(fd.Get(fd.Schema.SIZE), 0)
Ejemplo n.º 11
0
  def ReadLatestPathInfosWithHashBlobReferences(self,
                                                client_paths,
                                                max_timestamp=None,
                                                cursor=None):
    """Returns PathInfos that have corresponding HashBlobReferences."""
    path_infos = {client_path: None for client_path in client_paths}

    path_id_components = {}
    for client_path in client_paths:
      path_id = rdf_objects.PathID.FromComponents(client_path.components)
      path_id_components[path_id] = client_path.components

    params = []
    query = """
    SELECT t.client_id, t.path_type, t.path_id, unix_timestamp(t.timestamp),
           s.stat_entry, h.hash_entry
      FROM (SELECT h.client_id, h.path_type, h.path_id,
                   MAX(h.timestamp) AS timestamp
              FROM client_path_hash_entries AS h
        INNER JOIN hash_blob_references AS b
                ON b.hash_id = h.sha256
             WHERE {conditions}
          GROUP BY client_id, path_type, path_id) AS t
 LEFT JOIN client_path_stat_entries AS s
        ON s.client_id = t.client_id
       AND s.path_type = t.path_type
       AND s.path_id = t.path_id
       AND s.timestamp = t.timestamp
 LEFT JOIN client_path_hash_entries AS h
        ON h.client_id = t.client_id
       AND h.path_type = t.path_type
       AND h.path_id = t.path_id
       AND h.timestamp = t.timestamp
    """

    path_conditions = []

    for client_path in client_paths:
      path_id = rdf_objects.PathID.FromComponents(client_path.components)

      path_conditions.append("""
      (client_id = %s AND path_type = %s AND path_id = %s)
      """)
      params.append(db_utils.ClientIDToInt(client_path.client_id))
      params.append(int(client_path.path_type))
      params.append(path_id.AsBytes())

    conditions = " OR ".join(path_conditions)
    if max_timestamp is not None:
      conditions = "({}) AND unix_timestamp(timestamp) <= %s".format(conditions)
      params.append(mysql_utils.RDFDatetimeToTimestamp(max_timestamp))

    cursor.execute(query.format(conditions=conditions), params)
    for row in cursor.fetchall():
      # pyformat: disable
      (client_id, path_type, path_id_bytes, timestamp,
       stat_entry_bytes, hash_entry_bytes) = row
      # pyformat: enable

      path_id = rdf_objects.PathID.FromBytes(path_id_bytes)
      components = path_id_components[path_id]

      if stat_entry_bytes is not None:
        stat_entry = rdf_client_fs.StatEntry.FromSerializedString(
            stat_entry_bytes)
      else:
        stat_entry = None

      hash_entry = rdf_crypto.Hash.FromSerializedString(hash_entry_bytes)

      client_path = db.ClientPath(
          client_id=db_utils.IntToClientID(client_id),
          path_type=path_type,
          components=path_id_components[path_id])

      path_info = rdf_objects.PathInfo(
          path_type=path_type,
          components=components,
          stat_entry=stat_entry,
          hash_entry=hash_entry,
          timestamp=mysql_utils.TimestampToRDFDatetime(timestamp))

      path_infos[client_path] = path_info

    return path_infos