示例#1
0
    def test(self):
        self.mkfs()

        # Get backend instance
        plain_backend = LocalBackend(self.storage_url, None, None)

        # Save metadata
        meta = plain_backend['s3ql_metadata']

        # Mount file system
        self.mount()

        # Increase sequence number
        seq_no = get_seq_no(plain_backend)
        plain_backend['s3ql_seq_no_%d' % (seq_no + 1)] = b'Empty'

        # Create a file, so that there's metadata to flush
        fname = os.path.join(self.mnt_dir, 'file1')
        with open(fname, 'w') as fh:
            fh.write('hello, world')

        # Try to upload metadata
        s3ql.ctrl.main(['upload-meta', self.mnt_dir])

        # Try to write. We repeat a few times, since the metadata upload
        # happens asynchronously.
        with pytest.raises(PermissionError):
            for _ in range(10):
                with open(fname + 'barz', 'w') as fh:
                    fh.write('foobar')
                time.sleep(1)
        self.reg_output(
            r'^ERROR: Remote metadata is newer than local '
            '\(\d+ vs \d+\), refusing to overwrite(?: and switching '
            'to failsafe mode)?!$',
            count=2)
        self.reg_output(
            r'^WARNING: File system errors encountered, marking for '
            'fsck\.$',
            count=1)
        self.reg_output(
            r'^ERROR: The locally cached metadata will be '
            '\*lost\* the next .+$',
            count=1)
        self.umount()

        # Assert that remote metadata has not been overwritten
        assert meta == plain_backend['s3ql_metadata']

        plain_backend.close()
示例#2
0
文件: mount.py 项目: drewlu/ossql
    def run(self):
        log.debug('MetadataUploadThread: start')
        
        while not self.quit:
            self.event.wait(self.interval)
            self.event.clear()
            
            if self.quit:
                break
            
            with llfuse.lock:
                if self.quit:
                    break
                new_mtime = os.stat(self.db.file).st_mtime 
                if self.db_mtime == new_mtime:
                    log.info('File system unchanged, not uploading metadata.')
                    continue
                
                # We dump to a file first, so that we don't hold the
                # lock for quite so long.
                log.info('Saving metadata...')
                fh = tempfile.TemporaryFile()
                dump_metadata(fh, self.db) 
              
            with self.bucket_pool() as bucket:
                seq_no = get_seq_no(bucket)
                if seq_no != self.param['seq_no']:
                    log.error('Remote metadata is newer than local (%d vs %d), '
                              'refusing to overwrite!', seq_no, self.param['seq_no'])
                    fh.close()
                    continue
                              
                log.info("Compressing & uploading metadata..")
                cycle_metadata(bucket)
                fh.seek(0)
                self.param['last-modified'] = time.time() - time.timezone
                
                # Temporarily decrease sequence no, this is not the final upload
                self.param['seq_no'] -= 1
                with bucket.open_write("s3ql_metadata", self.param) as obj_fh:
                    shutil.copyfileobj(fh, obj_fh)
                self.param['seq_no'] += 1
                
                fh.close()
                self.db_mtime = new_mtime    

        log.debug('MetadataUploadThread: end')    
示例#3
0
文件: mount.py 项目: drewlu/ossql
    def run(self):
        log.debug('MetadataDownloadThread: start')
        
        while not self.quit:
            self.event.wait(self.interval)
            self.event.clear()
            
            if self.quit:
                break
            
            with self.bucket_pool() as bucket:
                #XXX: call bucket.is_get_consistent() to verify data consistency later
                seq_no = get_seq_no(bucket)
                if seq_no > self.param['seq_no']:
                    log.info('Remote metadata is newer than local (%d vs %d), '
                              'download it', seq_no, self.param['seq_no'])
                elif seq_no < self.param['seq_no']:
                    log.warn('Remote metadata is older than local (%s vs %d), '
                              'ignore the bucket until upload metadata thread done',
                              seq_no, self.param['seq_no'])
                    continue
                else:
                    log.info('seqno equals local (%d vs %d), ignore then download',
                             seq_no, self.param['seq_no'])
                    continue
                              
                log.info("Downloading & uncompressing metadata...")
                os.close(os.open(self.cachepath + '.db.tmp',
                                 os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                                 stat.S_IRUSR | stat.S_IWUSR))

                
                db_conn = Connection(self.cachepath + '.db.tmp', fast_mode=True)
                with bucket.open_read("s3ql_metadata") as fh:
                    restore_metadata(fh, db_conn)
                db_conn.close()

                with llfuse.lock:
                    if self.quit:
                        break
                    os.rename(self.cachepath + '.db.tmp', self.cachepath + '.db')
                    self.db_mtime = os.stat(self.cachepath + '.db').st_mtime 
                    self.param['seq_no'] = seq_no

        log.debug('MetadataDownloadThread: end')    
示例#4
0
    def test(self):
        self.mkfs()

        # Get backend instance
        plain_backend = LocalBackend(self.storage_url, None, None)

        # Save metadata
        meta = plain_backend['s3ql_metadata']

        # Mount file system
        self.mount()

        # Increase sequence number
        seq_no = get_seq_no(plain_backend)
        plain_backend['s3ql_seq_no_%d' % (seq_no+1)] = b'Empty'

        # Create a file, so that there's metadata to flush
        fname = os.path.join(self.mnt_dir, 'file1')
        with open(fname, 'w') as fh:
            fh.write('hello, world')

        # Try to upload metadata
        s3ql.ctrl.main(['upload-meta', self.mnt_dir])

        # Try to write. We repeat a few times, since the metadata upload
        # happens asynchronously.
        with pytest.raises(PermissionError):
            for _ in range(10):
                with open(fname + 'barz', 'w') as fh:
                    fh.write('foobar')
                time.sleep(1)
        self.capfd.register_output(r'^ERROR: Remote metadata is newer than local '
                                   '\(\d+ vs \d+\), refusing to overwrite(?: and switching '
                                   'to failsafe mode)?!$', count=2)
        self.capfd.register_output(r'^WARNING: File system errors encountered, marking for '
                                   'fsck\.$', count=1)
        self.capfd.register_output(r'^ERROR: The locally cached metadata will be '
                                   '\*lost\* the next .+$', count=1)
        self.umount()

        # Assert that remote metadata has not been overwritten
        assert meta == plain_backend['s3ql_metadata']

        plain_backend.close()
示例#5
0
    def test(self):
        self.mkfs()

        # Get backend instance
        plain_backend = LocalBackend(self.storage_url, None, None)

        # Save metadata
        meta = plain_backend['s3ql_metadata']

        # Mount file system
        self.mount(fatal_warnings=False)

        # Increase sequence number
        seq_no = get_seq_no(plain_backend)
        plain_backend['s3ql_seq_no_%d' % (seq_no+1)] = b'Empty'

        # Create a file, so that there's metadata to flush
        fname = os.path.join(self.mnt_dir, 'file1')
        with open(fname, 'w') as fh:
            fh.write('hello, world')

        # Try to upload metadata
        s3ql.ctrl.main(['upload-meta', self.mnt_dir])

        # Try to write. We repeat a few times, since the metadata upload
        # happens asynchronously.
        with pytest.raises(PermissionError):
            for _ in range(10):
                with open(fname + 'barz', 'w') as fh:
                    fh.write('foobar')
                time.sleep(1)

        self.umount()

        # Assert that remote metadata has not been overwritten
        assert meta == plain_backend['s3ql_metadata']

        plain_backend.close()
示例#6
0
    def test(self):
        self.mkfs()

        # Get backend instance
        plain_backend = LocalBackend(self.storage_url, None, None)

        # Save metadata
        meta = plain_backend['s3ql_metadata']

        # Mount file system
        self.mount(fatal_warnings=False)

        # Increase sequence number
        seq_no = get_seq_no(plain_backend)
        plain_backend['s3ql_seq_no_%d' % (seq_no+1)] = b'Empty'

        # Create a file, so that there's metadata to flush
        fname = os.path.join(self.mnt_dir, 'file1')
        with open(fname, 'w') as fh:
            fh.write('hello, world')

        # Try to upload metadata
        s3ql.ctrl.main(['upload-meta', self.mnt_dir])

        # Try to write. We repeat a few times, since the metadata upload
        # happens asynchronously.
        with pytest.raises(PermissionError):
            for _ in range(10):
                with open(fname + 'barz', 'w') as fh:
                    fh.write('foobar')
                time.sleep(1)

        self.umount()

        # Assert that remote metadata has not been overwritten
        assert meta == plain_backend['s3ql_metadata']

        plain_backend.close()
示例#7
0
文件: fsck.py 项目: drewlu/ossql
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)
        
    # Check if fs is mounted on this computer
    # This is not foolproof but should prevent common mistakes
    match = options.storage_url + ' /'
    with open('/proc/mounts', 'r') as fh:
        for line in fh:
            if line.startswith(match):
                raise QuietError('Can not check mounted file system.')
    

    bucket = get_bucket(options)
    
    cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)
    seq_no = get_seq_no(bucket)
    param_remote = bucket.lookup('s3ql_metadata')
    db = None
    
    if os.path.exists(cachepath + '.params'):
        assert os.path.exists(cachepath + '.db')
        param = pickle.load(open(cachepath + '.params', 'rb'))
        if param['seq_no'] < seq_no:
            log.info('Ignoring locally cached metadata (outdated).')
            param = bucket.lookup('s3ql_metadata')
        else:
            log.info('Using cached metadata.')
            db = Connection(cachepath + '.db')
            assert not os.path.exists(cachepath + '-cache') or param['needs_fsck']
    
        if param_remote['seq_no'] != param['seq_no']:
            log.warn('Remote metadata is outdated.')
            param['needs_fsck'] = True
            
    else:
        param = param_remote
        assert not os.path.exists(cachepath + '-cache')
        # .db might exist if mount.s3ql is killed at exactly the right instant
        # and should just be ignored.
       
    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
    
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Probably the file system has not
                  been properly unmounted and you should try to run fsck on the computer 
                  where the file system has been mounted most recently.
                  ''')))
        else:
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Either the file system has not
                  been unmounted cleanly or the data has not yet propagated through the backend.
                  In the later case, waiting for a while should fix the problem, in
                  the former case you should try to run fsck on the computer where
                  the file system has been mounted most recently
                  ''')))
    
        print('Enter "continue" to use the outdated data anyway:',
              '> ', sep='\n', end='')
        if options.batch:
            raise QuietError('(in batch mode, exiting)')
        if sys.stdin.readline().strip() != 'continue':
            raise QuietError()
        
        param['seq_no'] = seq_no
        param['needs_fsck'] = True
    
    
    if (not param['needs_fsck'] 
        and ((time.time() - time.timezone) - param['last_fsck'])
             < 60 * 60 * 24 * 31): # last check more than 1 month ago
        if options.force:
            log.info('File system seems clean, checking anyway.')
        else:
            log.info('File system is marked as clean. Use --force to force checking.')
            return
    
    # If using local metadata, check consistency
    if db:
        log.info('Checking DB integrity...')
        try:
            # get_list may raise CorruptError itself
            res = db.get_list('PRAGMA integrity_check(20)')
            if res[0][0] != u'ok':
                log.error('\n'.join(x[0] for x in res ))
                raise apsw.CorruptError()
        except apsw.CorruptError:
            raise QuietError('Local metadata is corrupted. Remove or repair the following '
                             'files manually and re-run fsck:\n'
                             + cachepath + '.db (corrupted)\n'
                             + cachepath + '.param (intact)')
    else:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
    
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    
    fsck = Fsck(cachepath + '-cache', bucket, param, db)
    fsck.check()
    
    if fsck.uncorrectable_errors:
        raise QuietError("Uncorrectable errors found, aborting.")
        
    if os.path.exists(cachepath + '-cache'):
        os.rmdir(cachepath + '-cache')
        
    log.info('Saving metadata...')
    fh = tempfile.TemporaryFile()
    dump_metadata(fh, db)  
            
    log.info("Compressing & uploading metadata..")
    cycle_metadata(bucket)
    fh.seek(0)
    param['needs_fsck'] = False
    param['last_fsck'] = time.time() - time.timezone
    param['last-modified'] = time.time() - time.timezone
    with bucket.open_write("s3ql_metadata", param) as dst:
        fh.seek(0)
        shutil.copyfileobj(fh, dst)
    fh.close()
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
        
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close() 
示例#8
0
文件: mount.py 项目: drewlu/ossql
def main(args=None):
    '''Mount S3QL file system'''

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    
    # Save handler so that we can remove it when daemonizing
    stdout_log_handler = setup_logging(options)
    
    if options.threads is None:
        options.threads = determine_threads(options)

    if not os.path.exists(options.mountpoint):
        raise QuietError('Mountpoint does not exist.')
        
    if options.profile:
        import cProfile
        import pstats
        prof = cProfile.Profile()

    bucket_factory = get_bucket_factory(options)
    bucket_pool = BucketPool(bucket_factory)
    
    # Get paths
    cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)

    # Retrieve metadata
    with bucket_pool() as bucket:
        (param, db) = get_metadata(bucket, cachepath, options.readonly)
            
    if options.nfs:
        log.info('Creating NFS indices...')
        # NFS may try to look up '..', so we have to speed up this kind of query
        db.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)')
        
        # Since we do not support generation numbers, we have to keep the
        # likelihood of reusing a just-deleted inode low
        inode_cache.RANDOMIZE_INODES = True
    else:
        db.execute('DROP INDEX IF EXISTS ix_contents_inode')
                       
    metadata_upload_thread = MetadataUploadThread(bucket_pool, param, db,
                                                  options.metadata_upload_interval)
    metadata_download_thread = MetadataDownloadThread(bucket_pool, param, cachepath,
                                                      options.metadata_download_interval)
    block_cache = BlockCache(bucket_pool, db, cachepath + '-cache',
                             options.cachesize * 1024, options.max_cache_entries)
    commit_thread = CommitThread(block_cache)
    operations = fs.Operations(block_cache, db, blocksize=param['blocksize'],
                               upload_event=metadata_upload_thread.event)
    
    log.info('Mounting filesystem...')
    llfuse.init(operations, options.mountpoint, get_fuse_opts(options))

    if not options.fg:
        if stdout_log_handler:
            logging.getLogger().removeHandler(stdout_log_handler)
        daemonize(options.cachedir)

    exc_info = setup_exchook()

    # After we start threads, we must be sure to terminate them
    # or the process will hang 
    try:
        block_cache.init(options.threads)
        metadata_upload_thread.start()
        metadata_download_thread.start()
        commit_thread.start()
        
        if options.upstart:
            os.kill(os.getpid(), signal.SIGSTOP)
        if options.profile:
            prof.runcall(llfuse.main, options.single)
        else:
            llfuse.main(options.single)
        
        log.info("FUSE main loop terminated.")
        
    except:
        log.info("Caught exception in main loop, unmounting file system...")  
          
        # Tell finally handler that there already is an exception
        if not exc_info:
            exc_info = sys.exc_info()
        
        # We do *not* unmount on exception. Why? E.g. if someone is mirroring the
        # mountpoint, and it suddenly becomes empty, all the mirrored data will be
        # deleted. However, it's crucial to still call llfuse.close, so that
        # Operations.destroy() can flush the inode cache.
        with llfuse.lock:
            llfuse.close(unmount=False)

        raise
            
    # Terminate threads
    finally:
        log.debug("Waiting for background threads...")
        for (op, with_lock) in ((metadata_upload_thread.stop, False),
                                (commit_thread.stop, False),
                                (block_cache.destroy, True),
                                (metadata_upload_thread.join, False),
                                (metadata_download_thread.join, False),
                                (commit_thread.join, False)):
            try:
                if with_lock:
                    with llfuse.lock:
                        op()
                else:
                    op()
            except:
                # We just live with the race cond here
                if not exc_info: 
                    exc_info = sys.exc_info()
                else:
                    log.exception("Exception during cleanup:")

        log.debug("All background threads terminated.")
 
    # Re-raise if main loop terminated due to exception in other thread
    # or during cleanup
    if exc_info:
        raise exc_info[0], exc_info[1], exc_info[2]
        
    # At this point, there should be no other threads left

    # Unmount
    log.info("Unmounting file system.")
    with llfuse.lock:
        llfuse.close()
    
    # Do not update .params yet, dump_metadata() may fail if the database is
    # corrupted, in which case we want to force an fsck.
       
    if not options.readonly:
        with bucket_pool() as bucket:   
            seq_no = get_seq_no(bucket)
            if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime:
                log.info('File system unchanged, not uploading metadata.')
                del bucket['s3ql_seq_no_%d' % param['seq_no']]         
                param['seq_no'] -= 1
                pickle.dump(param, open(cachepath + '.params', 'wb'), 2)         
            elif seq_no == param['seq_no']:
                log.info('Uploading metadata...')     
                cycle_metadata(bucket)
                param['last-modified'] = time.time() - time.timezone
                with tempfile.TemporaryFile() as tmp:
                    dump_metadata(tmp, db)
                    tmp.seek(0)
                    with bucket.open_write('s3ql_metadata', param) as fh:
                        shutil.copyfileobj(tmp, fh)
                pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
            else:
                log.error('Remote metadata is newer than local (%d vs %d), '
                          'refusing to overwrite!', seq_no, param['seq_no'])
                log.error('The locally cached metadata will be *lost* the next time the file system '
                          'is mounted or checked and has therefore been backed up.')
                for name in (cachepath + '.params', cachepath + '.db'):
                    for i in reversed(range(4)):
                        if os.path.exists(name + '.%d' % i):
                            os.rename(name + '.%d' % i, name + '.%d' % (i+1))     
                    os.rename(name, name + '.0')
   
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close() 

    if options.profile:
        tmp = tempfile.NamedTemporaryFile()
        prof.dump_stats(tmp.name)
        fh = open('s3ql_profile.txt', 'w')
        p = pstats.Stats(tmp.name, stream=fh)
        tmp.close()
        p.strip_dirs()
        p.sort_stats('cumulative')
        p.print_stats(50)
        p.sort_stats('time')
        p.print_stats(50)
        fh.close()
示例#9
0
文件: mount.py 项目: drewlu/ossql
def get_metadata(bucket, cachepath, readonly=False):
    '''Retrieve metadata
    
    Checks:
    - Revision
    - Unclean mounts
    
    Locally cached metadata is used if up-to-date.
    '''
                
    seq_no = get_seq_no(bucket)

    # Check for cached metadata
    db = None
    if os.path.exists(cachepath + '.params'):
        param = pickle.load(open(cachepath + '.params', 'rb'))
        if param['seq_no'] < seq_no:
            log.info('Ignoring locally cached metadata (outdated).')
            param = bucket.lookup('s3ql_metadata')
        else:
            log.info('Using cached metadata.')
            db = Connection(cachepath + '.db')
    else:
        param = bucket.lookup('s3ql_metadata')
 
    # Check for unclean shutdown
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not
           #    the case, the file system may have not been unmounted cleanly and you should try
           #    to run fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint but local bucket is consistent, just ignore it")
        else:                
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not the
           #    case, the file system may have not been unmounted cleanly or the data from the 
           #    most-recent mount may have not yet propagated through the backend. In the later case,
           #    waiting for a while should fix the problem, in the former case you should try to run
           #    fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint and local bucket is inconsistent, could not ignore the error")
       
    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
        
    # Check that the fs itself is clean
    if not readonly and param['needs_fsck']:
        raise QuietError("File system damaged or not unmounted cleanly, run fsck!")        
    if (time.time() - time.timezone) - param['last_fsck'] > 60 * 60 * 24 * 31:
        log.warn('Last file system check was more than 1 month ago, '
                 'running fsck.s3ql is recommended.')
    
    # Download metadata
    if not db:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
 
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    param['needs_fsck'] = False
    
    return (param, db)