Exemplo n.º 1
0
    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket = local.Bucket(self.bucket_dir, None, None)
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.bucket,
                  { 'blocksize': self.blocksize }, self.db)
        self.fsck.expect_errors = True
Exemplo n.º 2
0
    def setUp(self):

        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))

        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
            (
                self.inode,
                stat.S_IFREG
                | stat.S_IRUSR
                | stat.S_IWUSR
                | stat.S_IXUSR
                | stat.S_IRGRP
                | stat.S_IXGRP
                | stat.S_IROTH
                | stat.S_IXOTH,
                os.getuid(),
                os.getgid(),
                time.time(),
                time.time(),
                time.time(),
                1,
                32,
            ),
        )

        self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
Exemplo n.º 3
0
    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))
        self.bucket = self.bucket_pool.pop_conn()
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()
        
        self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir,
                                      self.blocksize * 5)
        self.server = fs.Operations(self.block_cache, self.db, self.blocksize)
          
        self.server.init()

        # Keep track of unused filenames
        self.name_cnt = 0
Exemplo n.º 4
0
Arquivo: adm.py Projeto: drewlu/ossql
def restore_legacy_metadata(ifh, conn):
    unpickler = pickle.Unpickler(ifh)
    (data_start, to_dump, sizes, columns) = unpickler.load()
    ifh.seek(data_start)
    create_tables(conn)
    create_legacy_tables(conn)
    for (table, _) in to_dump:
        log.info('Loading %s', table)
        col_str = ', '.join(columns[table])
        val_str = ', '.join('?' for _ in columns[table])
        if table in ('inodes', 'blocks', 'objects', 'contents'):
            sql_str = 'INSERT INTO leg_%s (%s) VALUES(%s)' % (table, col_str, val_str)
        else:
            sql_str = 'INSERT INTO %s (%s) VALUES(%s)' % (table, col_str, val_str)
        for _ in xrange(sizes[table]):
            buf = unpickler.load()
            for row in buf:
                conn.execute(sql_str, row)

    # Create a block for each object
    conn.execute('''
         INSERT INTO blocks (id, hash, refcount, obj_id, size)
            SELECT id, hash, refcount, id, size FROM leg_objects
    ''')
    conn.execute('''
         INSERT INTO objects (id, refcount, compr_size)
            SELECT id, 1, compr_size FROM leg_objects
    ''')
    conn.execute('DROP TABLE leg_objects')
              
    # Create new inode_blocks table for inodes with multiple blocks
    conn.execute('''
         CREATE TEMP TABLE multi_block_inodes AS 
            SELECT inode FROM leg_blocks
            GROUP BY inode HAVING COUNT(inode) > 1
    ''')    
    conn.execute('''
         INSERT INTO inode_blocks (inode, blockno, block_id)
            SELECT inode, blockno, obj_id 
            FROM leg_blocks JOIN multi_block_inodes USING(inode)
    ''')
    
    # Create new inodes table for inodes with multiple blocks
    conn.execute('''
        INSERT INTO inodes (id, uid, gid, mode, mtime, atime, ctime, 
                            refcount, size, rdev, locked, block_id)
               SELECT id, uid, gid, mode, mtime, atime, ctime, 
                      refcount, size, rdev, locked, NULL
               FROM leg_inodes JOIN multi_block_inodes ON inode == id 
            ''')
    
    # Add inodes with just one block or no block
    conn.execute('''
        INSERT INTO inodes (id, uid, gid, mode, mtime, atime, ctime, 
                            refcount, size, rdev, locked, block_id)
               SELECT id, uid, gid, mode, mtime, atime, ctime, 
                      refcount, size, rdev, locked, obj_id
               FROM leg_inodes LEFT JOIN leg_blocks ON leg_inodes.id == leg_blocks.inode 
               GROUP BY leg_inodes.id HAVING COUNT(leg_inodes.id) <= 1  
            ''')
    
    conn.execute('''
        INSERT INTO symlink_targets (inode, target)
        SELECT id, target FROM leg_inodes WHERE target IS NOT NULL
    ''')
    
    conn.execute('DROP TABLE leg_inodes')
    conn.execute('DROP TABLE leg_blocks')
    
    # Sort out names
    conn.execute('''
        INSERT INTO names (name, refcount) 
        SELECT name, COUNT(name) FROM leg_contents GROUP BY name
    ''')
    conn.execute('''
        INSERT INTO contents (name_id, inode, parent_inode) 
        SELECT names.id, inode, parent_inode 
        FROM leg_contents JOIN names ON leg_contents.name == names.name
    ''')
    conn.execute('DROP TABLE leg_contents')
    
    conn.execute('ANALYZE')
Exemplo n.º 5
0
Arquivo: mkfs.py Projeto: drewlu/ossql
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)
    
    plain_bucket = get_bucket(options, plain=True)
    
    if 's3ql_metadata' in plain_bucket:
        if not options.force:
            raise QuietError("Found existing file system! Use --force to overwrite")
            
        log.info('Purging existing file system data..')
        plain_bucket.clear()
        if not plain_bucket.is_get_consistent():
            log.info('Please note that the new file system may appear inconsistent\n'
                     'for a while until the removals have propagated through the backend.')
            
    if not options.plain:
        if sys.stdin.isatty():
            wrap_pw = getpass("Enter encryption password: "******"Confirm encryption password: "******"Passwords don't match.")
        else:
            wrap_pw = sys.stdin.readline().rstrip()

        # Generate data encryption passphrase
        log.info('Generating random encryption key...')
        fh = open('/dev/urandom', "rb", 0) # No buffering
        data_pw = fh.read(32)
        fh.close()
        
        bucket = BetterBucket(wrap_pw, 'bzip2', plain_bucket)
        bucket['s3ql_passphrase'] = data_pw
    else:    
        data_pw = None
        
    bucket = BetterBucket(data_pw, 'bzip2', plain_bucket)

    # Setup database
    cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)

    # There can't be a corresponding bucket, so we can safely delete
    # these files.
    if os.path.exists(cachepath + '.db'):
        os.unlink(cachepath + '.db')
    if os.path.exists(cachepath + '-cache'):
        shutil.rmtree(cachepath + '-cache')

    log.info('Creating metadata tables...')
    db = Connection(cachepath + '.db')
    create_tables(db)
    init_tables(db)

    param = dict()
    param['revision'] = CURRENT_FS_REV
    param['seq_no'] = 1
    param['label'] = options.label
    param['blocksize'] = options.blocksize * 1024
    param['needs_fsck'] = False
    param['last_fsck'] = time.time() - time.timezone
    param['last-modified'] = time.time() - time.timezone
    
    # This indicates that the convert_legacy_metadata() stuff
    # in BetterBucket is not required for this file system.
    param['bucket_revision'] = 1
    
    bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')

    log.info('Uploading metadata...')
    with bucket.open_write('s3ql_metadata', param) as fh:
        dump_metadata(fh, db)  
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
Exemplo n.º 6
0
 def setUp(self):
     self.dbfile = tempfile.NamedTemporaryFile()
     self.db = Connection(self.dbfile.name)
     create_tables(self.db)
     init_tables(self.db)
     self.cache = inode_cache.InodeCache(self.db)