コード例 #1
0
def generateDirHashes(db, crypto, cacheDir):
    conn = db.conn
    r = conn.execute("SELECT COUNT(*) FROM Files WHERE Dir = 1")
    nDirs = r.fetchone()[0]
    logger.info("Hashing %d directories", nDirs)
    hashes = 0
    unique = 0
    with progressbar.ProgressBar(max_value=nDirs) as bar:
        z = conn.cursor()
        r = conn.execute(
            "SELECT Inode, Device, LastSet, Names.name, Checksums.ChecksumId, Checksum "
            "FROM Files "
            "JOIN Names ON Names.NameId = Files.NameID "
            "JOIN Checksums ON Files.ChecksumId = Checksums.ChecksumId "
            "WHERE Dir = 1 "
            "ORDER BY Checksum")
        lastHash = None
        batch = r.fetchmany(10000)
        while batch:
            for row in batch:
                inode = row['Inode']
                device = row['Device']
                last = row['LastSet']
                oldHash = row['Checksum']
                cksId = row['ChecksumId']
                files = db.readDirectory((inode, device), last)
                hashes += 1
                if oldHash == lastHash:
                    continue
                lastHash = oldHash
                unique += 1

                #logger.debug("Rehashing directory %s (%d, %d)@%d: %s(%d)", crypto.decryptFilename(row['Name']),inode, device, last, oldHash, cksId)
                #logger.debug("    Directory contents: %s", str(files))
                (newHash, newSize) = Util.hashDir(crypto,
                                                  files,
                                                  True,
                                                  decrypt=True)
                #logger.info("Rehashed %s => %s.  %d files", oldHash, newHash, newSize)
                bar.update(hashes)
                try:
                    if newHash != oldHash:
                        z.execute(
                            "UPDATE Checksums SET Checksum = :newHash WHERE ChecksumId = :id",
                            {
                                "newHash": newHash,
                                "id": cksId
                            })
                except Exception as e:
                    logger.error("Caught exception: %s->%s :: %s", oldHash,
                                 newHash, str(e))
            batch = r.fetchmany()
    logger.info("Hashed %d directories (%d unique)", hashes, unique)
コード例 #2
0
ファイル: setDirHashes.py プロジェクト: Acidburn0zzz/Tardis
def main():
    logging.basicConfig(level=logging.INFO)
    crypto = None
    token = None
    args = processArgs()
    password = Util.getPassword(args.password, args.passwordfile,
                                args.passwordurl, args.passwordprog)

    if password:
        crypto = TardisCrypto.TardisCrypto(password, args.client)

    path = os.path.join(args.database, args.client, args.dbname)
    db = TardisDB.TardisDB(path, token=token, backup=False)

    if crypto:
        (a, b) = db.getKeys()
        crypto.setKeys(a, b)

    conn = db.conn
    dirs = conn.execute(
        "SELECT Name as name, Inode AS inode, Device AS device, FirstSet as firstset, LastSet AS lastset FROM Files JOIN Names ON Files.NameId = Names.NameId WHERE Dir = 1"
    )
    while True:
        batch = dirs.fetchmany(1000)
        if not batch:
            break

        for d in batch:
            name = d['name']
            inode = d['inode']
            device = d['device']
            firstset = d['firstset']
            lastset = d['lastset']

            files = db.readDirectory((inode, device), current=lastset)
            (checksum, nfiles) = Util.hashDir(crypto, files, True)

            print("%-20s (%d, %d) [%d %d] -- %s %d") % (
                name, inode, device, firstset, lastset, checksum, nfiles)
            ckinfo = db.getChecksumInfo(checksum)
            if ckinfo:
                cksid = ckinfo['checksumid']
            else:
                cksid = db.insertChecksumFile(checksum,
                                              size=nfiles,
                                              isFile=False)

            db.updateDirChecksum((inode, device), cksid, current=lastset)
        conn.commit()
コード例 #3
0
ファイル: setDirHashes.py プロジェクト: koldinger/Tardis
def main():
    logging.basicConfig(level=logging.INFO)
    crypto = None
    token = None
    args = processArgs()
    password = Util.getPassword(args.password, args.passwordfile, args.passwordurl, args.passwordprog)

    if password:
        crypto = TardisCrypto.TardisCrypto(password, args.client)

    path = os.path.join(args.database, args.client, args.dbname)
    db = TardisDB.TardisDB(path, token=token, backup=False)

    if crypto:
        (a, b) = db.getKeys()
        crypto.setKeys(a, b)

    conn = db.conn
    dirs = conn.execute("SELECT Name as name, Inode AS inode, Device AS device, FirstSet as firstset, LastSet AS lastset FROM Files JOIN Names ON Files.NameId = Names.NameId WHERE Dir = 1")
    while True:
        batch = dirs.fetchmany(1000)
        if not batch:
            break

        for d in batch:
            name     = d['name']
            inode    = d['inode']
            device   = d['device']
            firstset = d['firstset']
            lastset  = d['lastset']

            files = db.readDirectory((inode, device), current=lastset)
            (checksum, nfiles) = Util.hashDir(crypto, files, True)

            print(("%-20s (%d, %d) [%d %d] -- %s %d") % (name, inode, device, firstset, lastset, checksum, nfiles))
            ckinfo = db.getChecksumInfo(checksum)
            if ckinfo:
                cksid = ckinfo['checksumid']
            else:
                cksid = db.insertChecksumFile(checksum, size=nfiles, isFile=False)

            db.updateDirChecksum((inode, device), cksid, current=lastset)
        conn.commit()
コード例 #4
0
ファイル: encryptDB.py プロジェクト: massimiliano76/Tardis
def generateDirHashes(db, crypto, cacheDir):
    conn = db.conn
    z = conn.cursor()
    r = conn.execute(
        "SELECT Inode, Device, LastSet, Names.name, Checksums.ChecksumId, Checksum "
        "FROM Files "
        "JOIN Names ON Names.NameId = Files.NameID "
        "JOIN Checksums ON Files.ChecksumId = Checksums.ChecksumId "
        "WHERE Dir = 1 "
        "ORDER BY Checksum")
    lastHash = None
    batch = r.fetchmany()
    while batch:
        for row in batch:
            inode = row['Inode']
            device = row['Device']
            last = row['LastSet']
            oldHash = row['Checksum']
            cksId = row['ChecksumId']
            files = db.readDirectory((inode, device), last)

            if oldHash == lastHash:
                continue
            lastHash = oldHash

            logger.debug("Rehashing directory %s (%d, %d)@%d: %s(%d)",
                         crypto.decryptFilename(row['Name']), inode, device,
                         last, oldHash, cksId)
            #logger.debug("    Directory contents: %s", str(files))
            (newHash, newSize) = Util.hashDir(crypto, files, True, True)
            logger.info("Rehashed %s => %s.  %d files", oldHash, newHash,
                        newSize)
            try:
                if newHash != oldHash:
                    z.execute(
                        "UPDATE Checksums SET Checksum = :newHash WHERE ChecksumId = :id",
                        {
                            "newHash": newHash,
                            "id": cksId
                        })
            except Exception:
                pass
        batch = r.fetchmany()
コード例 #5
0
ファイル: encryptDB.py プロジェクト: koldinger/Tardis
def generateDirHashes(db, crypto, cacheDir):
    conn = db.conn
    r = conn.execute("SELECT COUNT(*) FROM Files WHERE Dir = 1")
    nDirs = r.fetchone()[0]
    logger.info("Hashing %d directories", nDirs)
    hashes = 0
    unique = 0
    with progressbar.ProgressBar(max_value=nDirs) as bar:
        z = conn.cursor()
        r = conn.execute("SELECT Inode, Device, LastSet, Names.name, Checksums.ChecksumId, Checksum "
                         "FROM Files "
                         "JOIN Names ON Names.NameId = Files.NameID "
                         "JOIN Checksums ON Files.ChecksumId = Checksums.ChecksumId "
                         "WHERE Dir = 1 "
                         "ORDER BY Checksum")
        lastHash = None
        batch = r.fetchmany(10000)
        while batch:
            for row in batch:
                inode = row['Inode']
                device = row['Device']
                last = row['LastSet']
                oldHash = row['Checksum']
                cksId = row['ChecksumId']
                files = db.readDirectory((inode, device), last)
                hashes += 1
                if oldHash == lastHash:
                    continue
                lastHash = oldHash
                unique += 1

                #logger.debug("Rehashing directory %s (%d, %d)@%d: %s(%d)", crypto.decryptFilename(row['Name']),inode, device, last, oldHash, cksId)
                #logger.debug("    Directory contents: %s", str(files))
                (newHash, newSize) = Util.hashDir(crypto, files, True, decrypt=True)
                #logger.info("Rehashed %s => %s.  %d files", oldHash, newHash, newSize)
                bar.update(hashes)
                try:
                    if newHash != oldHash:
                        z.execute("UPDATE Checksums SET Checksum = :newHash WHERE ChecksumId = :id", {"newHash": newHash, "id": cksId})
                except Exception as e:
                    logger.error("Caught exception: %s->%s :: %s", oldHash, newHash,str(e))
            batch = r.fetchmany()
    logger.info("Hashed %d directories (%d unique)", hashes, unique)