Exemple #1
0
def db():
    dbfile = tempfile.NamedTemporaryFile()
    db = Connection(dbfile.name)
    create_tables(db)
    init_tables(db)
    try:
        yield db
    finally:
        db.close()
        dbfile.close()
def db():
    dbfile = tempfile.NamedTemporaryFile()
    db = Connection(dbfile.name)
    create_tables(db)
    init_tables(db)
    try:
        yield db
    finally:
        db.close()
        dbfile.close()
Exemple #3
0
    def test(self):
        skip_without_rsync()

        ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
        try:
            self.populate_dir(ref_dir)

            # Copy source data
            self.mkfs()

            # Force 64bit inodes
            cachepath = os.path.join(self.cache_dir, _escape(self.storage_url))
            db = Connection(cachepath + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2**36 + 10, 'inodes'))
            db.close()

            self.mount()
            subprocess.check_call(
                ['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/'])
            self.umount()
            self.fsck()

            # Delete cache, run fsck and compare
            shutil.rmtree(self.cache_dir)
            self.cache_dir = tempfile.mkdtemp('s3ql-cache-')
            self.fsck()
            self.mount()
            try:
                out = check_output([
                    'rsync', '-anciHAX', '--delete', '--exclude',
                    '/lost+found', ref_dir + '/', self.mnt_dir + '/'
                ],
                                   universal_newlines=True,
                                   stderr=subprocess.STDOUT)
            except CalledProcessError as exc:
                pytest.fail('rsync failed with ' + exc.output)
            if out:
                pytest.fail('Copy not equal to original, rsync says:\n' + out)

            self.umount()

            # Delete cache and mount
            shutil.rmtree(self.cache_dir)
            self.cache_dir = tempfile.mkdtemp(prefix='s3ql-cache-')
            self.mount()
            self.umount()

        finally:
            shutil.rmtree(ref_dir)
Exemple #4
0
    def run(self):
        log.debug('MetadataDownloadThread: start')
        
        while not self.quit:
            self.event.wait(self.interval)
            self.event.clear()
            
            if self.quit:
                break
            
            with self.bucket_pool() as bucket:
                #XXX: call bucket.is_get_consistent() to verify data consistency later
                seq_no = get_seq_no(bucket)
                if seq_no > self.param['seq_no']:
                    log.info('Remote metadata is newer than local (%d vs %d), '
                              'download it', seq_no, self.param['seq_no'])
                elif seq_no < self.param['seq_no']:
                    log.warn('Remote metadata is older than local (%s vs %d), '
                              'ignore the bucket until upload metadata thread done',
                              seq_no, self.param['seq_no'])
                    continue
                else:
                    log.info('seqno equals local (%d vs %d), ignore then download',
                             seq_no, self.param['seq_no'])
                    continue
                              
                log.info("Downloading & uncompressing metadata...")
                os.close(os.open(self.cachepath + '.db.tmp',
                                 os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                                 stat.S_IRUSR | stat.S_IWUSR))

                
                db_conn = Connection(self.cachepath + '.db.tmp', fast_mode=True)
                with bucket.open_read("s3ql_metadata") as fh:
                    restore_metadata(fh, db_conn)
                db_conn.close()

                with llfuse.lock:
                    if self.quit:
                        break
                    os.rename(self.cachepath + '.db.tmp', self.cachepath + '.db')
                    self.db_mtime = os.stat(self.cachepath + '.db').st_mtime 
                    self.param['seq_no'] = seq_no

        log.debug('MetadataDownloadThread: end')    
Exemple #5
0
    def runTest(self):
        skip_without_rsync()
        ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(
                get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2**31 + 10, 'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(
                ['rsync', '-aHAX', ref_dir + '/', self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(
                get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertGreater(
                db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                           ('inodes', )), 2**31 + 10)
            self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'),
                               2**31 + 10)
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(
                get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertLess(
                db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                           ('inodes', )), 2**31)
            self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2**31)
            db.close()

            # Compare
            self.mount()
            try:
                out = check_output([
                    'rsync', '-anciHAX', '--delete', '--exclude',
                    '/lost+found', ref_dir + '/', self.mnt_dir + '/'
                ],
                                   universal_newlines=True,
                                   stderr=subprocess.STDOUT)
            except CalledProcessError as exc:
                self.fail('rsync failed with ' + exc.output)
            if out:
                self.fail('Copy not equal to original, rsync says:\n' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
Exemple #6
0
    def runTest(self):
        try:
            subprocess.call(['rsync', '--version'],
                            stderr=subprocess.STDOUT,
                            stdout=open('/dev/null', 'wb'))
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                raise unittest.SkipTest('rsync not installed')
            raise

        ref_dir = tempfile.mkdtemp()
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2 ** 31 + 10, u'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
                                   self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertGreater(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31 + 10)
            self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31 + 10)
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertLess(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31)
            self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31)
            db.close()

            # Compare
            self.mount()
            rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete',
                                      '--exclude', '/lost+found',
                                      ref_dir + '/', self.mnt_dir + '/'],
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
            out = rsync.communicate()[0]
            if out:
                self.fail('Copy not equal to original, rsync says:\n' + out)
            elif rsync.returncode != 0:
                self.fail('rsync failed with ' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
Exemple #7
0
    def test(self):
        skip_without_rsync()
        ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2 ** 31 + 10, 'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
                                   self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                              ('inodes',)) > 2 ** 31 + 10
            assert db.get_val('SELECT MAX(id) FROM inodes') > 2 ** 31 + 10
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                               ('inodes',)) < 2 ** 31
            assert db.get_val('SELECT MAX(id) FROM inodes') < 2 ** 31
            db.close()

            # Compare
            self.mount()
            try:
                out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found',
                                   ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True,
                                  stderr=subprocess.STDOUT)
            except CalledProcessError as exc:
                pytest.fail('rsync failed with ' + exc.output)
            if out:
                pytest.fail('Copy not equal to original, rsync says:\n' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
Exemple #8
0
class DumpTests(unittest.TestCase):
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)

    def tearDown(self):
        self.src.close()
        self.dst.close()
        self.tmpfh1.close()
        self.tmpfh2.close()
        self.fh.close()

    def test_transactions(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        self.dst.execute('PRAGMA journal_mode = WAL')

        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh, trx_rows=10)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_1(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_2(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_3(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_auto(self):
        self.fill_vals(self.src)
        self.fill_buf(self.src)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_fixed(self):
        BUFLEN = 32
        self.fill_vals(self.src)
        self.fill_buf(self.src, BUFLEN)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB, BUFLEN))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_1(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_2(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_3(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_4_time(self):
        self.fill_vals(self.src)

        t1 = 0.5 * time.time()
        t2 = 2 * time.time()
        for (id_,) in self.src.query('SELECT id FROM test'):
            val = random.uniform(t1, t2)
            self.src.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))

        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.TIME))

        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)

        self.compare_tables(self.src, self.dst)


    def test_5_multi(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.dst.execute('DELETE FROM test')
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)


    def compare_tables(self, db1, db2):
        i1 = db1.query('SELECT id, buf FROM test ORDER BY id')
        i2 = db2.query('SELECT id, buf FROM test ORDER BY id')

        for (id1, buf1) in i1:
            (id2, buf2) = next(i2)

            self.assertEqual(id1, id2)
            if isinstance(buf1, float):
                self.assertAlmostEqual(buf1, buf2, places=9)
            else:
                self.assertEqual(buf1, buf2)

        self.assertRaises(StopIteration, i2.__next__)

    def fill_buf(self, db, len_=None):
        with open('/dev/urandom', 'rb') as rfh:
            first = True
            for (id_,) in db.query('SELECT id FROM test'):
                if len_ is None and first:
                    val = b'' # We always want to check this case
                    first = False
                elif len_ is None:
                    val = rfh.read(random.randint(0, 140))
                else:
                    val = rfh.read(len_)

                db.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))

    def fill_vals(self, db):
        vals = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33, 62]:
            vals += list(range(2 ** exp - 5, 2 ** exp + 6))
        vals += list(range(2 ** 63 - 5, 2 ** 63))
        vals += [ -v for v  in vals ]
        vals.append(-(2 ** 63))

        for val in vals:
            db.execute('INSERT INTO test (id) VALUES(?)', (val,))

    def fill_deltas(self, db):
        deltas = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33]:
            deltas += list(range(2 ** exp - 5, 2 ** exp + 6))
        deltas += [ -v for v  in deltas ]

        last = 0
        for delta in deltas:
            val = last + delta
            last = val
            db.execute('INSERT INTO test (id) VALUES(?)', (val,))

    def create_table(self, db):
        db.execute('''CREATE TABLE test (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            buf BLOB)''')
Exemple #9
0
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)
        
    # Check if fs is mounted on this computer
    # This is not foolproof but should prevent common mistakes
    match = options.storage_url + ' /'
    with open('/proc/mounts', 'r') as fh:
        for line in fh:
            if line.startswith(match):
                raise QuietError('Can not check mounted file system.')
    

    bucket = get_bucket(options)
    
    cachepath = get_bucket_cachedir(options.storage_url, options.cachedir)
    seq_no = get_seq_no(bucket)
    param_remote = bucket.lookup('s3ql_metadata')
    db = None
    
    if os.path.exists(cachepath + '.params'):
        assert os.path.exists(cachepath + '.db')
        param = pickle.load(open(cachepath + '.params', 'rb'))
        if param['seq_no'] < seq_no:
            log.info('Ignoring locally cached metadata (outdated).')
            param = bucket.lookup('s3ql_metadata')
        else:
            log.info('Using cached metadata.')
            db = Connection(cachepath + '.db')
            assert not os.path.exists(cachepath + '-cache') or param['needs_fsck']
    
        if param_remote['seq_no'] != param['seq_no']:
            log.warn('Remote metadata is outdated.')
            param['needs_fsck'] = True
            
    else:
        param = param_remote
        assert not os.path.exists(cachepath + '-cache')
        # .db might exist if mount.s3ql is killed at exactly the right instant
        # and should just be ignored.
       
    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
    
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Probably the file system has not
                  been properly unmounted and you should try to run fsck on the computer 
                  where the file system has been mounted most recently.
                  ''')))
        else:
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Either the file system has not
                  been unmounted cleanly or the data has not yet propagated through the backend.
                  In the later case, waiting for a while should fix the problem, in
                  the former case you should try to run fsck on the computer where
                  the file system has been mounted most recently
                  ''')))
    
        print('Enter "continue" to use the outdated data anyway:',
              '> ', sep='\n', end='')
        if options.batch:
            raise QuietError('(in batch mode, exiting)')
        if sys.stdin.readline().strip() != 'continue':
            raise QuietError()
        
        param['seq_no'] = seq_no
        param['needs_fsck'] = True
    
    
    if (not param['needs_fsck'] 
        and ((time.time() - time.timezone) - param['last_fsck'])
             < 60 * 60 * 24 * 31): # last check more than 1 month ago
        if options.force:
            log.info('File system seems clean, checking anyway.')
        else:
            log.info('File system is marked as clean. Use --force to force checking.')
            return
    
    # If using local metadata, check consistency
    if db:
        log.info('Checking DB integrity...')
        try:
            # get_list may raise CorruptError itself
            res = db.get_list('PRAGMA integrity_check(20)')
            if res[0][0] != u'ok':
                log.error('\n'.join(x[0] for x in res ))
                raise apsw.CorruptError()
        except apsw.CorruptError:
            raise QuietError('Local metadata is corrupted. Remove or repair the following '
                             'files manually and re-run fsck:\n'
                             + cachepath + '.db (corrupted)\n'
                             + cachepath + '.param (intact)')
    else:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
    
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    
    fsck = Fsck(cachepath + '-cache', bucket, param, db)
    fsck.check()
    
    if fsck.uncorrectable_errors:
        raise QuietError("Uncorrectable errors found, aborting.")
        
    if os.path.exists(cachepath + '-cache'):
        os.rmdir(cachepath + '-cache')
        
    log.info('Saving metadata...')
    fh = tempfile.TemporaryFile()
    dump_metadata(fh, db)  
            
    log.info("Compressing & uploading metadata..")
    cycle_metadata(bucket)
    fh.seek(0)
    param['needs_fsck'] = False
    param['last_fsck'] = time.time() - time.timezone
    param['last-modified'] = time.time() - time.timezone
    with bucket.open_write("s3ql_metadata", param) as dst:
        fh.seek(0)
        shutil.copyfileobj(fh, dst)
    fh.close()
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
        
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close() 
Exemple #10
0
class DumpTests(unittest.TestCase):
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)

    def tearDown(self):
        self.src.close()
        self.dst.close()
        self.tmpfh1.close()
        self.tmpfh2.close()
        self.fh.close()

    def test_transactions(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        self.dst.execute('PRAGMA journal_mode = WAL')

        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh,
                             trx_rows=10)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_1(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_2(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_3(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_auto(self):
        self.fill_vals(self.src)
        self.fill_buf(self.src)
        dumpspec = (('id', deltadump.INTEGER), ('buf', deltadump.BLOB))
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_fixed(self):
        BUFLEN = 32
        self.fill_vals(self.src)
        self.fill_buf(self.src, BUFLEN)
        dumpspec = (('id', deltadump.INTEGER), ('buf', deltadump.BLOB, BUFLEN))
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_1(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_2(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_3(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_5_multi(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0), )
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        deltadump.dump_table(table='test',
                             order='id',
                             columns=dumpspec,
                             db=self.src,
                             fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.dst.execute('DELETE FROM test')
        deltadump.load_table(table='test',
                             columns=dumpspec,
                             db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def compare_tables(self, db1, db2):
        i1 = db1.query('SELECT id, buf FROM test ORDER BY id')
        i2 = db2.query('SELECT id, buf FROM test ORDER BY id')

        for (id1, buf1) in i1:
            (id2, buf2) = next(i2)

            self.assertEqual(id1, id2)
            if isinstance(buf1, float):
                self.assertAlmostEqual(buf1, buf2, places=9)
            else:
                self.assertEqual(buf1, buf2)

        self.assertRaises(StopIteration, i2.__next__)

    def fill_buf(self, db, len_=None):
        with open('/dev/urandom', 'rb') as rfh:
            first = True
            for (id_, ) in db.query('SELECT id FROM test'):
                if len_ is None and first:
                    val = b''  # We always want to check this case
                    first = False
                elif len_ is None:
                    val = rfh.read(random.randint(0, 140))
                else:
                    val = rfh.read(len_)

                db.execute('UPDATE test SET buf=? WHERE id=?', (val, id_))

    def fill_vals(self, db):
        vals = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33, 62]:
            vals += list(range(2**exp - 5, 2**exp + 6))
        vals += list(range(2**63 - 5, 2**63))
        vals += [-v for v in vals]
        vals.append(-(2**63))

        for val in vals:
            db.execute('INSERT INTO test (id) VALUES(?)', (val, ))

    def fill_deltas(self, db):
        deltas = []
        for exp in [7, 8, 9, 15, 16, 17, 31, 32, 33]:
            deltas += list(range(2**exp - 5, 2**exp + 6))
        deltas += [-v for v in deltas]

        last = 0
        for delta in deltas:
            val = last + delta
            last = val
            db.execute('INSERT INTO test (id) VALUES(?)', (val, ))

    def create_table(self, db):
        db.execute('''CREATE TABLE test (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            buf BLOB)''')
Exemple #11
0
def get_metadata(bucket, cachepath, readonly=False):
    '''Retrieve metadata
    
    Checks:
    - Revision
    - Unclean mounts
    
    Locally cached metadata is used if up-to-date.
    '''
                
    seq_no = get_seq_no(bucket)

    # Check for cached metadata
    db = None
    if os.path.exists(cachepath + '.params'):
        param = pickle.load(open(cachepath + '.params', 'rb'))
        if param['seq_no'] < seq_no:
            log.info('Ignoring locally cached metadata (outdated).')
            param = bucket.lookup('s3ql_metadata')
        else:
            log.info('Using cached metadata.')
            db = Connection(cachepath + '.db')
    else:
        param = bucket.lookup('s3ql_metadata')
 
    # Check for unclean shutdown
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not
           #    the case, the file system may have not been unmounted cleanly and you should try
           #    to run fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint but local bucket is consistent, just ignore it")
        else:                
           #raise QuietError(textwrap.fill(textwrap.dedent('''\
           #    It appears that the file system is still mounted somewhere else. If this is not the
           #    case, the file system may have not been unmounted cleanly or the data from the 
           #    most-recent mount may have not yet propagated through the backend. In the later case,
           #    waiting for a while should fix the problem, in the former case you should try to run
           #    fsck on the computer where the file system has been mounted most recently.
           #    ''')))
            log.warn("local seqno is smaller than bucket seqno, which implies another mountpoint and local bucket is inconsistent, could not ignore the error")
       
    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
        
    # Check that the fs itself is clean
    if not readonly and param['needs_fsck']:
        raise QuietError("File system damaged or not unmounted cleanly, run fsck!")        
    if (time.time() - time.timezone) - param['last_fsck'] > 60 * 60 * 24 * 31:
        log.warn('Last file system check was more than 1 month ago, '
                 'running fsck.s3ql is recommended.')
    
    # Download metadata
    if not db:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
 
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    param['needs_fsck'] = False
    
    return (param, db)