def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket = local.Bucket(self.bucket_dir, None, None) self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) self.fsck = Fsck(self.cachedir, self.bucket, { 'blocksize': self.blocksize }, self.db) self.fsck.expect_errors = True
def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None)) self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Create an inode we can work with self.inode = 42 self.db.execute( "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)", ( self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH, os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32, ), ) self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire()
def setUp(self): self.bucket_dir = tempfile.mkdtemp() self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None)) self.bucket = self.bucket_pool.pop_conn() self.cachedir = tempfile.mkdtemp() + "/" self.blocksize = 1024 self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() self.block_cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 5) self.server = fs.Operations(self.block_cache, self.db, self.blocksize) self.server.init() # Keep track of unused filenames self.name_cnt = 0
def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) plain_bucket = get_bucket(options, plain=True) if 's3ql_metadata' in plain_bucket: if not options.force: raise QuietError("Found existing file system! Use --force to overwrite") log.info('Purging existing file system data..') plain_bucket.clear() if not plain_bucket.is_get_consistent(): log.info('Please note that the new file system may appear inconsistent\n' 'for a while until the removals have propagated through the backend.') if not options.plain: if sys.stdin.isatty(): wrap_pw = getpass("Enter encryption password: "******"Confirm encryption password: "******"Passwords don't match.") else: wrap_pw = sys.stdin.readline().rstrip() # Generate data encryption passphrase log.info('Generating random encryption key...') fh = open('/dev/urandom', "rb", 0) # No buffering data_pw = fh.read(32) fh.close() bucket = BetterBucket(wrap_pw, 'bzip2', plain_bucket) bucket['s3ql_passphrase'] = data_pw else: data_pw = None bucket = BetterBucket(data_pw, 'bzip2', plain_bucket) # Setup database cachepath = get_bucket_cachedir(options.storage_url, options.cachedir) # There can't be a corresponding bucket, so we can safely delete # these files. if os.path.exists(cachepath + '.db'): os.unlink(cachepath + '.db') if os.path.exists(cachepath + '-cache'): shutil.rmtree(cachepath + '-cache') log.info('Creating metadata tables...') db = Connection(cachepath + '.db') create_tables(db) init_tables(db) param = dict() param['revision'] = CURRENT_FS_REV param['seq_no'] = 1 param['label'] = options.label param['blocksize'] = options.blocksize * 1024 param['needs_fsck'] = False param['last_fsck'] = time.time() - time.timezone param['last-modified'] = time.time() - time.timezone # This indicates that the convert_legacy_metadata() stuff # in BetterBucket is not required for this file system. param['bucket_revision'] = 1 bucket.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty') log.info('Uploading metadata...') with bucket.open_write('s3ql_metadata', param) as fh: dump_metadata(fh, db) pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
def setUp(self): self.dbfile = tempfile.NamedTemporaryFile() self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) self.cache = inode_cache.InodeCache(self.db)