Exemple #1
0
def analyze(path):
    fs = FileStorage(path, read_only=1)
    fsi = fs.iterator()
    report = Report()
    for txn in fsi:
        analyze_trans(report, txn)
    return report
Exemple #2
0
 def setUp(self):
     ZODB.tests.util.TestCase.setUp(self)
     self.path = 'source.fs'
     self.storage = FileStorage(self.path)
     self.populate()
     self.dest = 'dest.fs'
     self.recovered = None
Exemple #3
0
 def setUp(self):
     # set up a zodb
     # we can't use DemoStorage here 'cos it doesn't support History
     self.dir = tempfile.mkdtemp()
     fs_path = os.path.join(self.dir, 'testHistory.fs')
     self.s = FileStorage(fs_path, create=True)
     self.connection = ZODB.DB(self.s).open()
     r = self.connection.root()
     a = Application()
     r['Application'] = a
     self.root = a
     # create a python script
     a['test'] = HistoryItem()
     self.hi = hi = a.test
     # commit some changes
     hi.title = 'First title'
     t = transaction.get()
     # undo note made by Application instantiation above.
     t.description = None
     t.note(u'Change 1')
     t.commit()
     time.sleep(0.02)  # wait at least one Windows clock tick
     hi.title = 'Second title'
     t = transaction.get()
     t.note(u'Change 2')
     t.commit()
     time.sleep(0.02)  # wait at least one Windows clock tick
     hi.title = 'Third title'
     t = transaction.get()
     t.note(u'Change 3')
     t.commit()
Exemple #4
0
class HistoryFreeFromFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
):

    keep_history = False

    def setUp(self):
        self.open(create=1)
        self._storage.zap_all()
        self._dst = self._storage
        self._storage = FileStorage("Source.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return self._dst

    def compare(self, src, dest):
        # The dest storage has a truncated copy of dest, so
        # use compare_truncated() instead of compare_exact().
        self.compare_truncated(src, dest)
Exemple #5
0
    def testBadTransaction(self):
        # Find transaction headers and blast them.

        L = self.storage.undoLog()
        r = L[3]
        tid = base64.decodestring(r["id"] + "\n")
        pos1 = self.storage._txn_find(tid, 0)

        r = L[8]
        tid = base64.decodestring(r["id"] + "\n")
        pos2 = self.storage._txn_find(tid, 0)

        self.storage.close()

        # Overwrite the entire header.
        f = open(self.path, "a+b")
        f.seek(pos1 - 50)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()
        os.remove(self.path)
        os.rename(self.dest, self.path)

        # Overwrite part of the header.
        f = open(self.path, "a+b")
        f.seek(pos2 + 10)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()
Exemple #6
0
class DbAdapter:
    def __init__(self, path="data.db"):
        self.path = path

    def connect(self):
        self.storage = FileStorage(self.path)
        self.db = DB(self.storage)
        self.conn = self.db.open()
        return self.conn.root()

    def begin_transaction(self):
        transaction.begin()

    def commit(self):
        transaction.commit()

    def rollback(self):
        transaction.abort()

    def disconnect(self):
        self.conn.close()
        self.db.close()
        self.storage.close()
        if os.path.exists(self.path + ".lock"):
            os.remove(self.path + ".lock")
Exemple #7
0
def do_full_backup(options):
    options.full = True
    dest = os.path.join(options.repository, gen_filename(options))
    if os.path.exists(dest):
        raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
    # Find the file position of the last completed transaction.
    fs = FileStorage(options.file, read_only=True)
    # Note that the FileStorage ctor calls read_index() which scans the file
    # and returns "the position just after the last valid transaction record".
    # getSize() then returns this position, which is exactly what we want,
    # because we only want to copy stuff from the beginning of the file to the
    # last valid transaction record.
    pos = fs.getSize()
    # Save the storage index into the repository
    index_file = os.path.join(options.repository,
                              gen_filename(options, '.index'))
    log('writing index')
    fs._index.save(pos, index_file)
    fs.close()
    log('writing full backup: %s bytes to %s', pos, dest)
    sum = copyfile(options, dest, 0, pos)
    # Write the data file for this full backup
    datfile = os.path.splitext(dest)[0] + '.dat'
    fp = open(datfile, 'w')
    print >> fp, dest, 0, pos, sum
    fp.flush()
    os.fsync(fp.fileno())
    fp.close()
    if options.killold:
        delete_old_backups(options)
 def tearDown(self):
     self.storage.close()
     if self.recovered is not None:
         self.recovered.close()
     temp = FileStorage(self.dest)
     temp.close()
     ZODB.tests.util.TestCase.tearDown(self)
Exemple #9
0
 def tearDown(self):
     self.storage.close()
     if self.recovered is not None:
         self.recovered.close()
     temp = FileStorage(self.dest)
     temp.close()
     ZODB.tests.util.TestCase.tearDown(self)
Exemple #10
0
def do_full_backup(options):
    options.full = True
    dest = os.path.join(options.repository, gen_filename(options))
    if os.path.exists(dest):
        raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
    # Find the file position of the last completed transaction.
    fs = FileStorage(options.file, read_only=True)
    # Note that the FileStorage ctor calls read_index() which scans the file
    # and returns "the position just after the last valid transaction record".
    # getSize() then returns this position, which is exactly what we want,
    # because we only want to copy stuff from the beginning of the file to the
    # last valid transaction record.
    pos = fs.getSize()
    # Save the storage index into the repository
    index_file = os.path.join(options.repository,
                              gen_filename(options, '.index'))
    log('writing index')
    fs._index.save(pos, index_file)
    fs.close()
    log('writing full backup: %s bytes to %s', pos, dest)
    sum = copyfile(options, dest, 0, pos)
    # Write the data file for this full backup
    datfile = os.path.splitext(dest)[0] + '.dat'
    fp = open(datfile, 'w')
    print >> fp, dest, 0, pos, sum
    fp.flush()
    os.fsync(fp.fileno())
    fp.close()
    if options.killold:
        delete_old_backups(options)
    def testBadTransaction(self):
        # Find transaction headers and blast them.

        L = self.storage.undoLog()
        r = L[3]
        tid = base64.decodestring(r["id"] + "\n")
        pos1 = self.storage._txn_find(tid, 0)

        r = L[8]
        tid = base64.decodestring(r["id"] + "\n")
        pos2 = self.storage._txn_find(tid, 0)

        self.storage.close()

        # Overwrite the entire header.
        f = open(self.path, "a+b")
        f.seek(pos1 - 50)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()
        os.remove(self.path)
        os.rename(self.dest, self.path)

        # Overwrite part of the header.
        f = open(self.path, "a+b")
        f.seek(pos2 + 10)
        f.write("\0" * 100)
        f.close()
        output = self.recover()
        self.assert_('error' in output, output)
        self.recovered = FileStorage(self.dest)
        self.recovered.close()
Exemple #12
0
 def start(self):
     self.started = 1
     self.path = tempfile.mktemp(suffix=".fs")
     self._storage = FileStorage(self.path)
     self.db = ZODB.DB(self._storage)
     self.do_updates()
     self.pid, self.exit = forker.start_zeo_server(self._storage, self.addr)
Exemple #13
0
def do_incremental_backup(options, reposz, repofiles):
    options.full = False
    dest = os.path.join(options.repository, gen_filename(options))
    if os.path.exists(dest):
        raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
    # Find the file position of the last completed transaction.
    fs = FileStorage(options.file, read_only=True)
    # Note that the FileStorage ctor calls read_index() which scans the file
    # and returns "the position just after the last valid transaction record".
    # getSize() then returns this position, which is exactly what we want,
    # because we only want to copy stuff from the beginning of the file to the
    # last valid transaction record.
    pos = fs.getSize()
    log('writing index')
    index_file = os.path.join(options.repository,
                              gen_filename(options, '.index'))
    fs._index.save(pos, index_file)
    fs.close()
    log('writing incremental: %s bytes to %s',  pos-reposz, dest)
    sum = copyfile(options, dest, reposz, pos - reposz)
    # The first file in repofiles points to the last full backup.  Use this to
    # get the .dat file and append the information for this incrementatl to
    # that file.
    fullfile = repofiles[0]
    datfile = os.path.splitext(fullfile)[0] + '.dat'
    # This .dat file better exist.  Let the exception percolate if not.
    fp = open(datfile, 'a')
    print >> fp, dest, reposz, pos, sum
    fp.flush()
    os.fsync(fp.fileno())
    fp.close()
Exemple #14
0
class HistoryFreeFromFileStorage(
        RelStorageTestBase,
        UndoableRecoveryStorage,
        ):

    keep_history = False

    def setUp(self):
        self.open(create=1)
        self._storage.zap_all()
        self._dst = self._storage
        self._storage = FileStorage("Source.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return self._dst

    def compare(self, src, dest):
        # The dest storage has a truncated copy of dest, so
        # use compare_truncated() instead of compare_exact().
        self.compare_truncated(src, dest)
Exemple #15
0
def do_incremental_backup(options, reposz, repofiles):
    options.full = False
    dest = os.path.join(options.repository, gen_filename(options))
    if os.path.exists(dest):
        raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
    # Find the file position of the last completed transaction.
    fs = FileStorage(options.file, read_only=True)
    # Note that the FileStorage ctor calls read_index() which scans the file
    # and returns "the position just after the last valid transaction record".
    # getSize() then returns this position, which is exactly what we want,
    # because we only want to copy stuff from the beginning of the file to the
    # last valid transaction record.
    pos = fs.getSize()
    log('writing index')
    index_file = os.path.join(options.repository,
                              gen_filename(options, '.index'))
    fs._index.save(pos, index_file)
    fs.close()
    log('writing incremental: %s bytes to %s',  pos-reposz, dest)
    sum = copyfile(options, dest, reposz, pos - reposz)
    # The first file in repofiles points to the last full backup.  Use this to
    # get the .dat file and append the information for this incrementatl to
    # that file.
    fullfile = repofiles[0]
    datfile = os.path.splitext(fullfile)[0] + '.dat'
    # This .dat file better exist.  Let the exception percolate if not.
    fp = open(datfile, 'a')
    print >> fp, dest, reposz, pos, sum
    fp.flush()
    os.fsync(fp.fileno())
    fp.close()
Exemple #16
0
class Base(object):

    def __init__(self, path, authkey):
        if not os.path.exists(path):
            os.makedirs(path)
        self._path = path
        self.authkey = authkey

        path = os.path.join(path, 'graph.fs')
        self.storage = FileStorage(path)
        self.db = DB(self.storage)

    def path(self):
        return self._path

    def process(self, connection):
        (func, args) = connection
        self.connection = func(*args)

    def recv(self):
        return self.connection.recv()

    def send(self, message):
        self.connection.send(message)
        self.connection.close()

    def open(self):
        return self.db.open()

    def close(self):
        transaction.get().abort()
        self.db.close()
        self.storage.close()
class PackerTests(StorageTestBase):

    def setUp(self):
        self.started = 0

    def start(self):
        self.started =1
        self.path = tempfile.mktemp(suffix=".fs")
        self._storage = FileStorage(self.path)
        self.db = ZODB.DB(self._storage)
        self.do_updates()
        self.pid, self.exit = forker.start_zeo_server(self._storage, self.addr)

    def do_updates(self):
        for i in range(100):
            self._dostore()

    def tearDown(self):
        if not self.started:
            return
        self.db.close()
        self._storage.close()
        self.exit.close()
        try:
            os.kill(self.pid, 9)
        except os.error:
            pass
        try:
            os.waitpid(self.pid, 0)
        except os.error, err:
            ##print "waitpid failed", err
            pass
        removefs(self.path)
 def openDB(self):
     from ZODB.FileStorage import FileStorage
     from ZODB.DB import DB
     self.dir = tempfile.mkdtemp()
     self.storage = FileStorage(os.path.join(self.dir,
                                             'testQCConflicts.fs'))
     self.db = DB(self.storage)
Exemple #19
0
def analyze(path):
    fs = FileStorage(path, read_only=1)
    fsi = fs.iterator()
    report = Report()
    for txn in fsi:
        analyze_trans(report, txn)
    return report
Exemple #20
0
 def setUp(self):
     # set up a zodb
     # we can't use DemoStorage here 'cos it doesn't support History
     self.dir = tempfile.mkdtemp()
     self.s = FileStorage(os.path.join(self.dir, 'testHistory.fs'),
                          create=True)
     self.connection = ZODB.DB(self.s).open()
     r = self.connection.root()
     a = Application()
     r['Application'] = a
     self.root = a
     # create a python script
     manage_addPythonScript(a, 'test')
     self.ps = ps = a.test
     # commit some changes
     ps.write('return 1')
     t = transaction.get()
     # undo note made by Application instantiation above.
     t.description = None
     t.note('Change 1')
     t.commit()
     ps.write('return 2')
     t = transaction.get()
     t.note('Change 2')
     t.commit()
     ps.write('return 3')
     t = transaction.get()
     t.note('Change 3')
     t.commit()
 def tearDown(self):
     fsetup = functional.FunctionalTestSetup()
     # close the filestorage files now by calling the original
     # close on our storage instance
     FileStorage.close(fsetup.base_storage)
     fsetup.base_storage = self.original
     fsetup.tearDown()
     fsetup.tearDownCompletely()
Exemple #22
0
 def tearDown(self):
     self.storage.close()
     if self.recovered is not None:
         self.recovered.close()
     self.storage.cleanup()
     temp = FileStorage(self.dest)
     temp.close()
     temp.cleanup()
Exemple #23
0
def main(path=None):
    verbose = 0
    if path is None:
        import sys
        import getopt

        opts, args = getopt.getopt(sys.argv[1:], "v")
        for k, v in opts:
            if k == "-v":
                verbose += 1

        path, = args


    fs = FileStorage(path, read_only=1)

    # Set of oids in the index that failed to load due to POSKeyError.
    # This is what happens if undo is applied to the transaction creating
    # the object (the oid is still in the index, but its current data
    # record has a backpointer of 0, and POSKeyError is raised then
    # because of that backpointer).
    undone = {}

    # Set of oids that were present in the index but failed to load.
    # This does not include oids in undone.
    noload = {}

    for oid in fs._index.keys():
        try:
            data, serial = fs.load(oid, "")
        except (KeyboardInterrupt, SystemExit):
            raise
        except POSKeyError:
            undone[oid] = 1
        except:
            if verbose:
                traceback.print_exc()
            noload[oid] = 1

    inactive = noload.copy()
    inactive.update(undone)
    for oid in fs._index.keys():
        if oid in inactive:
            continue
        data, serial = fs.load(oid, "")
        refs = get_refs(data)
        missing = [] # contains 3-tuples of oid, klass-metadata, reason
        for ref, klass in refs:
            if klass is None:
                klass = '<unknown>'
            if ref not in fs._index:
                missing.append((ref, klass, "missing"))
            if ref in noload:
                missing.append((ref, klass, "failed to load"))
            if ref in undone:
                missing.append((ref, klass, "object creation was undone"))
        if missing:
            report(oid, data, serial, missing)
Exemple #24
0
 def testFourBlocks(self):
     for i in range(self.ITERATIONS):
         self.damage(4, 512)
         output = self.recover()
         self.assertTrue('error' in output, output)
         self.recovered = FileStorage(self.dest)
         self.recovered.close()
         os.remove(self.path)
         os.rename(self.dest, self.path)
Exemple #25
0
def analyze(path, use_dbm):
    fs = FileStorage(path, read_only=1)
    fsi = fs.iterator()
    report = Report(use_dbm)
    for txn in fsi:
        analyze_trans(report, txn)
    if use_dbm:
        shutil.rmtree(report.temp_dir)
    return report
Exemple #26
0
def main(path=None):
    if path is None:
        import sys
        import getopt

        opts, args = getopt.getopt(sys.argv[1:], "v")
        for k, v in opts:
            if k == "-v":
                VERBOSE += 1

        path, = args

    
    fs = FileStorage(path, read_only=1)

    # Set of oids in the index that failed to load due to POSKeyError.
    # This is what happens if undo is applied to the transaction creating
    # the object (the oid is still in the index, but its current data
    # record has a backpointer of 0, and POSKeyError is raised then
    # because of that backpointer).
    undone = {}

    # Set of oids that were present in the index but failed to load.
    # This does not include oids in undone.
    noload = {}

    for oid in fs._index.keys():
        try:
            data, serial = fs.load(oid, "")
        except (KeyboardInterrupt, SystemExit):
            raise
        except POSKeyError:
            undone[oid] = 1
        except:
            if VERBOSE:
                traceback.print_exc()
            noload[oid] = 1

    inactive = noload.copy()
    inactive.update(undone)
    for oid in fs._index.keys():
        if oid in inactive:
            continue
        data, serial = fs.load(oid, "")
        refs = get_refs(data)
        missing = [] # contains 3-tuples of oid, klass-metadata, reason
        for ref, klass in refs:
            if klass is None:
                klass = '<unknown>'
            if ref not in fs._index:
                missing.append((ref, klass, "missing"))
            if ref in noload:
                missing.append((ref, klass, "failed to load"))
            if ref in undone:
                missing.append((ref, klass, "object creation was undone"))
        if missing:
            report(oid, data, serial, missing)
Exemple #27
0
 def testBigBlock(self):
     for i in range(self.ITERATIONS):
         self.damage(1, 32 * 1024)
         output = self.recover()
         self.assert_('error' in output, output)
         self.recovered = FileStorage(self.dest)
         self.recovered.close()
         os.remove(self.path)
         os.rename(self.dest, self.path)
Exemple #28
0
def analyze(path, use_dbm):
    fs = FileStorage(path, read_only=1)
    fsi = fs.iterator()
    report = Report(use_dbm)
    for txn in fsi:
        analyze_trans(report, txn)
    if use_dbm:
        shutil.rmtree(report.temp_dir)
    return report
Exemple #29
0
    def checkBackwardTimeTravelWithRevertWhenStale(self):
        # If revert_when_stale is true, when the database
        # connection is stale (such as through failover to an
        # asynchronous slave that is not fully up to date), the poller
        # should notice that backward time travel has occurred and
        # invalidate all objects that have changed in the interval.
        self._storage = self.make_storage(revert_when_stale=True)

        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage

        db = DB(self._storage)
        try:
            transaction.begin()
            c = db.open()
            r = c.root()
            r["alpha"] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                transaction.begin()
                fs = FileStorage(os.path.join(d, "Data.fs"))
                fs.copyTransactionsFrom(c._storage)

                r["beta"] = PersistentMapping()
                transaction.commit()
                self.assertTrue("beta" in r)

                c._storage.zap_all(reset_oid=False, slow=True)
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # r should still be in the cache.
            self.assertTrue("beta" in r)

            # Now sync, which will call poll_invalidations().
            c.sync()

            # r should have been invalidated
            self.assertEqual(r._p_changed, None)

            # r should be reverted to its earlier state.
            self.assertFalse("beta" in r)

        finally:
            db.close()
Exemple #30
0
 def tearDown(self):
     fsetup = functional.FunctionalTestSetup(self.config_file)
     # close the filestorage files now by calling the original
     # close on our storage instance
     FileStorage.close(fsetup.base_storage)
     # replace the storage with the original, so functionalsetup
     # can do what it wants with it
     fsetup.base_storage = self.original
     fsetup.tearDown()
     fsetup.tearDownCompletely()
Exemple #31
0
    def test_pack_with_1_day(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b'\0' * 8
        state, serial = storage.load(oid, '')
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', '--days=1', self.cfg_fn])

        # packing should not have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertEqual(state, storage.loadSerial(oid, serial))
        storage.close()
    def __init__(self, path=None):
        if path == None:
            path = self.path

        try:
            self.storage = FileStorage(path)
            self.db = DB(self.storage)
            self.connection = self.db.open()
            self.root = self.connection.root()
        except Exception as e:
            logger.error("ZODB CONNECTOR ERROR: " + str(e))
Exemple #33
0
def main(path, search_oids):
    fs = FileStorage(path, read_only=1)

    # Set of oids in the index that failed to load due to POSKeyError.
    # This is what happens if undo is applied to the transaction creating
    # the object (the oid is still in the index, but its current data
    # record has a backpointer of 0, and POSKeyError is raised then
    # because of that backpointer).
    undone = {}

    # Set of oids that were present in the index but failed to load.
    # This does not include oids in undone.
    noload = {}

    for oid in fs._index.keys():
        try:
            data, serial = fs.load(oid, "")
        except (KeyboardInterrupt, SystemExit):
            raise
        except POSKeyError:
            undone[oid] = 1
        except:
            if VERBOSE:
                traceback.print_exc()
            noload[oid] = 1

    inactive = noload.copy()
    inactive.update(undone)

    for oid in fs._index.keys():
        if oid in inactive:
            continue
        data, serial = fs.load(oid, "")
        refs = get_refs(data)
        missing = [] # contains 3-tuples of oid, klass-metadata, reason
        for info in refs:
            try:
                ref, klass = info
            except (ValueError, TypeError):
                # failed to unpack
                ref = info
                klass = '<unknown>'

            if ref in search_oids:
                report(oid, data, serial, [(ref, klass, "searching for")])
                
            if ref not in fs._index:
                missing.append((ref, klass, "missing"))
            if ref in noload:
                missing.append((ref, klass, "failed to load"))
            if ref in undone:
                missing.append((ref, klass, "object creation was undone"))
        if missing:
            report(oid, data, serial, missing)
Exemple #34
0
class MyZODB(object):
    def __init__(self, path):
        self.storage = FileStorage(path)
        self.db = DB(self.storage)
        self.connection = self.db.open()
        self.dbroot = self.connection.root()

    def close(self):
        self.connection.close()
        self.db.close()
        self.storage.close()
Exemple #35
0
class EventCollection(object):
    """
    Structure to store an ensemble of events to disk and utilities to
    iterate through the events.
    """

    events_since_save = 0
    storage = None
    db = None
    connection = None
    store = None
    events_since_save = 0

    def __init__(self, filename):
        self.filename = filename
        self.open()

    def __enter__(self):
        pass

    def __exit__(self, type, value, traceback):
        self.close()

    def open(self):
        self.storage = FileStorage(self.filename)
        self.db = DB(self.storage)
        self.connection = self.db.open()
        self.store = self.connection.root()
        self.events_since_save = 0
        return self

    def close(self):
        self.connection.close()
        self.storage.close()

    def new_key(self):
        return max(self.store.keys())+1 if self.store.keys() else 0

    def save(self):
        transaction.commit()

    def events(self):
        for key in self.store.keys():
            yield self.store[key]

    def add_event(self, event):
        self.store[self.new_key()] = event
        self.events_since_save += 1
        if self.events_since_save > 10000:
            print "Saving..."
            self.events_since_save = 0
            self.save()
Exemple #36
0
    def checkBackwardTimeTravel(self):
        # When a failover event causes the storage to switch to an
        # asynchronous slave that is not fully up to date, the poller
        # should notice that backward time travel has occurred and
        # handle the situation by invalidating all objects that have
        # changed in the interval. (Currently, we simply invalidate all
        # objects when backward time travel occurs.)
        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage
        db = DB(self._storage)
        try:
            c = db.open()
            r = c.root()
            r['alpha'] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                fs = FileStorage(os.path.join(d, 'Data.fs'))
                fs.copyTransactionsFrom(c._storage)

                r['beta'] = PersistentMapping()
                transaction.commit()
                self.assertTrue('beta' in r)

                c._storage.zap_all()
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # r should still be in the cache.
            self.assertTrue('beta' in r)

            # Now sync, which will call poll_invalidations().
            c.sync()

            # r should have been invalidated
            self.assertEqual(r._p_changed, None)

            # r should be reverted to its earlier state.
            self.assertFalse('beta' in r)

        finally:
            db.close()
Exemple #37
0
class TestBackend:

  def __init__(self, filename, mode):

    self.mode = mode

    if mode == "w":
      self.storage = FileStorage(filename)
      db = DB(self.storage)
      connection = db.open()
      self.test_db_items = connection.root()

    elif mode == "r":
      self.storage = FileStorage(filename)
      db = DB(self.storage)
      connection = db.open()
      self.test_db_items = connection.root()

      self.next_rec_num = 0   # Initialise next record counter
      self.num_records = len(self.test_db_items)

  def __setitem__(self, key, value):

    self.test_db_items[key] = value

  def __getitem__(self, key):

    return self.test_db_items[str(key)]

  def __len__(self):

    return len(self.test_db_items)

  def first(self):

    return self.test_db_items[0]

  def iteritems(self):

    while(self.next_rec_num < self.num_records):
      value = self.test_db_items[self.next_rec_num]
  
      self.next_rec_num += 1

      yield value

  def close(self):
    transaction.commit()
    self.storage.close()

  def getTestDBItems(self):
    return self.test_db_items.values()
Exemple #38
0
 def connectZODB(self):
     print "connnecting"
     if not os.path.exists(self.bd + '.fs'):
         self.storage = FileStorage(self.bd + '.fs')
         self.db = DB(self.storage)
         self.connection = self.db.open()
         self.root = self.connection.root()
         self.root = PersistentDict()
     else:
         self.storage = FileStorage(self.bd + '.fs')
         self.db = DB(self.storage)
         self.connection = self.db.open()
         self.root = self.connection.root()
Exemple #39
0
 def connectZODB(self):
     print "connnecting"
     if not os.path.exists(self.bd+'.fs'):
         self.storage = FileStorage(self.bd+'.fs')
         self.db = DB(self.storage)
         self.connection = self.db.open()
         self.root = self.connection.root()
         self.root = PersistentDict()
     else:
         self.storage = FileStorage(self.bd+'.fs')
         self.db = DB(self.storage)
         self.connection = self.db.open()
         self.root = self.connection.root()
Exemple #40
0
    def generate_durus_object_records():
        sio = cStringIO.StringIO()
        zodb_storage = ZODBFileStorage(zodb_file_name)
        n = 0
        for oid in zodb_storage._index.keys():
            n += 1
            if n % 10000 == 0:
                sys.stdout.write('.')
                sys.stdout.flush()
            p, serial = zodb_storage.load(oid, '')
            refs = referencesf(p)
            # unwrap extra tuple from class meta data
            sio.seek(0)
            sio.write(p)
            sio.truncate()
            sio.seek(0)
            def get_class(module_class):
                module, klass = module_class
                if module not in sys.modules:
                    __import__(module)
                return getattr(sys.modules[module], klass)
            class PersistentRef:
                def __init__(self, v):
                    oid, module_class = v
                    self.oid_klass = (oid, get_class(module_class))
            unpickler = cPickle.Unpickler(sio)
            unpickler.persistent_load = lambda v: PersistentRef(v)
            class_meta = unpickler.load()
            class_meta, extra = class_meta
            assert extra is None
            object_state = unpickler.load()
            if type(object_state) == dict and  '_container' in object_state:
                assert 'data' not in object_state
                object_state['data'] = object_state['_container']
                del object_state['_container']
            sio.seek(0)
            sio.truncate()
            cPickle.dump(get_class(class_meta), sio, 2)

            pickler = cPickle.Pickler(sio, 2)
            def persistent_id(v):
                if isinstance(v, PersistentRef):
                    return v.oid_klass
                return None
            pickler.persistent_id = persistent_id
            pickler.dump(object_state)
            record = pack_record(oid, sio.getvalue(), ''.join(refs))
            yield record
        print
        print n, 'objects written'
Exemple #41
0
    def test_pack_defaults(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        from ZODB.POSException import POSKeyError
        import time
        import transaction
        from relstorage.zodbpack import main

        storage = FileStorage(self.db_fn, create=True)
        db = DB(storage)
        conn = db.open()
        conn.root()['x'] = 1
        transaction.commit()
        oid = b'\0' * 8
        state, serial = storage.load(oid, '')
        time.sleep(0.1)
        conn.root()['x'] = 2
        transaction.commit()
        conn.close()
        self.assertEqual(state, storage.loadSerial(oid, serial))
        db.close()
        storage = None

        main(['', self.cfg_fn])

        # packing should have removed the old state.
        storage = FileStorage(self.db_fn)
        self.assertRaises(POSKeyError, storage.loadSerial, oid, serial)
        storage.close()
Exemple #42
0
    def checkBackwardTimeTravelWithoutRevertWhenStale(self):
        # If revert_when_stale is false (the default), when the database
        # connection is stale (such as through failover to an
        # asynchronous slave that is not fully up to date), the poller
        # should notice that backward time travel has occurred and
        # raise a ReadConflictError.
        self._storage = self.make_storage(revert_when_stale=False)

        import os
        import shutil
        import tempfile
        from ZODB.FileStorage import FileStorage

        db = DB(self._storage)
        try:
            c = db.open()
            r = c.root()
            r["alpha"] = PersistentMapping()
            transaction.commit()

            # To simulate failover to an out of date async slave, take
            # a snapshot of the database at this point, change some
            # object, then restore the database to its earlier state.

            d = tempfile.mkdtemp()
            try:
                fs = FileStorage(os.path.join(d, "Data.fs"))
                fs.copyTransactionsFrom(c._storage)

                r["beta"] = PersistentMapping()
                transaction.commit()
                self.assertTrue("beta" in r)

                c._storage.zap_all(reset_oid=False, slow=True)
                c._storage.copyTransactionsFrom(fs)

                fs.close()
            finally:
                shutil.rmtree(d)

            # Sync, which will call poll_invalidations().
            c.sync()

            # Try to load an object, which should cause ReadConflictError.
            r._p_deactivate()
            self.assertRaises(ReadConflictError, lambda: r["beta"])

        finally:
            db.close()
    def setUp(self):
        # Set up a ZODB and Application object. We can't use DemoStorage
        # as it doesn't support the history() API.
        self._dir = tempfile.mkdtemp()
        self._storage = FileStorage(
            os.path.join(self._dir, 'test_athistoryaware.fs'),
            create=True)
        self._connection = ZODB.DB(self._storage).open()
        root = self._connection.root()
        root['Application'] = OFS.Application.Application()
        self.app = root['Application']

        # Our basic testing object
        self.app.object = DummyObject()
        self.object = self.app.object
        t = transaction.get()
        t.description = None  # clear initial transaction note
        t.note('Transaction 1')
        t.setUser('User 1')
        t.commit()

        # Alter object and annotations over several transactions
        annotations = self.object.__annotations__
        self.object.foo = 'baz'
        annotations[KEY1].spam = 'python'
        t = transaction.get()
        t.note('Transaction 2')
        t.setUser('User 2')
        t.commit()

        annotations[KEY3] = DummyAnnotation()
        t = transaction.get()
        t.note('Transaction 3')
        t.setUser('User 3')
        t.commit()

        del annotations[KEY3]
        annotations[KEY2].spam = 'lumberjack'
        t = transaction.get()
        t.note('Transaction 4')
        t.setUser('User 4')
        t.commit()

        self.object.foo = 'mit'
        annotations[KEY1].spam = 'trout'
        t = transaction.get()
        t.note('Transaction 5')
        t.setUser('User 5')
        t.commit()
Exemple #44
0
def main(path):
    fs = FileStorage(path, read_only=1)
    if PACK:
        fs.pack()

    db = ZODB.DB(fs)
    rt = db.open().root()
    paths = find_paths(rt, 3)

    def total_size(oid):
        cache = {}
        cache_size = 1000

        def _total_size(oid, seen):
            v = cache.get(oid)
            if v is not None:
                return v
            data, serialno = fs.load(oid, '')
            size = len(data)
            for suboid in referencesf(data):
                if seen.has_key(suboid):
                    continue
                seen[suboid] = 1
                size += _total_size(suboid, seen)
            cache[oid] = size
            if len(cache) == cache_size:
                cache.popitem()
            return size

        return _total_size(oid, {})

    keys = fs._index.keys()
    keys.sort()
    keys.reverse()

    if not VERBOSE:
        # If not running verbosely, don't print an entry for an object
        # unless it has an entry in paths.
        keys = filter(paths.has_key, keys)

    fmt = "%8s %5d %8d %s %s.%s"

    for oid in keys:
        data, serialno = fs.load(oid, '')
        mod, klass = get_pickle_metadata(data)
        refs = referencesf(data)
        path = paths.get(oid, '-')
        print fmt % (U64(oid), len(data), total_size(oid), path, mod, klass)
Exemple #45
0
def main(path):
    fs = FileStorage(path, read_only=1)
    if PACK:
        fs.pack()

    db = ZODB.DB(fs)
    rt = db.open().root()
    paths = find_paths(rt, 3)

    def total_size(oid):
        cache = {}
        cache_size = 1000

        def _total_size(oid, seen):
            v = cache.get(oid)
            if v is not None:
                return v
            data, serialno = fs.load(oid, "")
            size = len(data)
            for suboid in referencesf(data):
                if seen.has_key(suboid):
                    continue
                seen[suboid] = 1
                size += _total_size(suboid, seen)
            cache[oid] = size
            if len(cache) == cache_size:
                cache.popitem()
            return size

        return _total_size(oid, {})

    keys = fs._index.keys()
    keys.sort()
    keys.reverse()

    if not VERBOSE:
        # If not running verbosely, don't print an entry for an object
        # unless it has an entry in paths.
        keys = filter(paths.has_key, keys)

    fmt = "%8s %5d %8d %s %s.%s"

    for oid in keys:
        data, serialno = fs.load(oid, "")
        mod, klass = get_pickle_metadata(data)
        refs = referencesf(data)
        path = paths.get(oid, "-")
        print fmt % (U64(oid), len(data), total_size(oid), path, mod, klass)
Exemple #46
0
    def open(self):
        import ZODB
        from ZODB.FileStorage import FileStorage
        self.path = self.conf['rdf.store_conf']
        openstr = os.path.abspath(self.path)

        try:
            fs = FileStorage(openstr)
        except IOError:
            L.exception("Failed to create a FileStorage")
            raise ZODBSourceOpenFailError(openstr)

        self.zdb = ZODB.DB(fs, cache_size=1600)
        self.conn = self.zdb.open()
        root = self.conn.root()
        if 'rdflib' not in root:
            root['rdflib'] = ConjunctiveGraph('ZODB')
        self.graph = root['rdflib']
        try:
            transaction.commit()
        except Exception:
            # catch commit exception and close db.
            # otherwise db would stay open and follow up tests
            # will detect the db in error state
            L.exception('Forced to abort transaction on ZODB store opening',
                        exc_info=True)
            transaction.abort()
        transaction.begin()
        self.graph.open(self.path)
Exemple #47
0
class HistoryFreeToFileStorage(RelStorageTestBase, BasicRecoveryStorage):
    # pylint:disable=abstract-method,too-many-ancestors
    keep_history = False

    def setUp(self):
        super(HistoryFreeToFileStorage, self).setUp()
        self._storage = self.make_storage()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._dst.close()
        self._dst.cleanup()
        super(HistoryFreeToFileStorage, self).tearDown()

    def new_dest(self):
        return FileStorage('Dest.fs')
Exemple #48
0
    def testMigrationTool(self):
        dfs_storage = self.__getDataFS()
        dfs_db = ZODB.DB(dfs_storage)
        self.__populate(dfs_db, with_undo=False)
        dump = self.__dump(dfs_storage)
        fs_path = dfs_storage.__name__
        dfs_db.close()

        neo = self.neo
        neo.start()

        kw = {'cluster': neo.cluster_name, 'quiet': None}
        master_nodes = neo.master_nodes.replace('/', ' ')
        if neo.SSL:
            kw['ca'], kw['cert'], kw['key'] = neo.SSL

        p = NEOProcess('neomigrate', fs_path, master_nodes, **kw)
        p.start()
        p.wait()

        os.remove(fs_path)
        p = NEOProcess('neomigrate', master_nodes, fs_path, **kw)
        p.start()
        p.wait()

        self.assertEqual(dump, self.__dump(FileStorage(fs_path)))
Exemple #49
0
    def test_no_overwrite(self):
        from ZODB.DB import DB
        from ZODB.FileStorage import FileStorage
        from relstorage.zodbconvert import main
        from relstorage.zodbconvert import storage_has_data
        import transaction

        src = FileStorage(self.srcfile)
        db = DB(src)  # create the root object
        db.close()

        dest = FileStorage(self.destfile)
        db = DB(dest)  # create the root object
        db.close()

        self.assertRaises(SystemExit, main, ['', self.cfgfile])
Exemple #50
0
 def openDB(self):
     from ZODB.FileStorage import FileStorage
     from ZODB.DB import DB
     n = 'fs_tmp__%s' % os.getpid()
     self.storage = FileStorage(n)
     self.db = DB(self.storage)
     return self.db
Exemple #51
0
 def setUp(self):
     # set up a zodb
     # we can't use DemoStorage here 'cos it doesn't support History
     self.dir = tempfile.mkdtemp()
     self.s = FileStorage(os.path.join(self.dir,'testHistory.fs'),create=True)
     self.connection = ZODB.DB(self.s).open()
     r = self.connection.root()
     a = Application()
     r['Application'] = a
     self.root = a
     # create a python script
     manage_addPythonScript(a,'test')
     self.ps = ps = a.test
     # commit some changes
     ps.write('return 1')
     t = transaction.get()
     # undo note made by Application instantiation above.
     t.description = None 
     t.note('Change 1')
     t.commit()
     ps.write('return 2')
     t = transaction.get()
     t.note('Change 2')
     t.commit()
     ps.write('return 3')
     t = transaction.get()
     t.note('Change 3')
     t.commit()
 def setUp(self):
     # set up a zodb
     # we can't use DemoStorage here 'cos it doesn't support History
     self.dir = tempfile.mkdtemp()
     fs_path = os.path.join(self.dir, 'testHistory.fs')
     self.s = FileStorage(fs_path, create=True)
     self.connection = ZODB.DB(self.s).open()
     r = self.connection.root()
     a = Application()
     r['Application'] = a
     self.root = a
     # create a python script
     a['test'] = HistoryItem()
     self.hi = hi = a.test
     # commit some changes
     hi.title = 'First title'
     t = transaction.get()
     # undo note made by Application instantiation above.
     t.description = None
     t.note('Change 1')
     t.commit()
     time.sleep(0.02) # wait at least one Windows clock tick
     hi.title = 'Second title'
     t = transaction.get()
     t.note('Change 2')
     t.commit()
     time.sleep(0.02) # wait at least one Windows clock tick
     hi.title = 'Third title'
     t = transaction.get()
     t.note('Change 3')
     t.commit()
Exemple #53
0
    def open(self):
        import ZODB
        from ZODB.FileStorage import FileStorage
        from zc.lockfile import LockError
        self.path = self.conf['rdf.store_conf']
        openstr = os.path.abspath(self.path)

        try:
            fs = FileStorage(openstr)
        except IOError:
            L.exception("Failed to create a FileStorage")
            raise ZODBSourceOpenFailError(openstr)
        except LockError:
            L.exception('Found database "{}" is locked when trying to open it. '
                    'The PID of this process: {}'.format(openstr, os.getpid()), exc_info=True)
            raise DatabaseConflict('Database ' + openstr + ' locked')

        self.zdb = ZODB.DB(fs, cache_size=1600)
        self.conn = self.zdb.open()
        root = self.conn.root()
        if 'rdflib' not in root:
            root['rdflib'] = ConjunctiveGraph('ZODB')
        self.graph = root['rdflib']
        try:
            transaction.commit()
        except Exception:
            # catch commit exception and close db.
            # otherwise db would stay open and follow up tests
            # will detect the db in error state
            L.exception('Forced to abort transaction on ZODB store opening', exc_info=True)
            transaction.abort()
        transaction.begin()
        self.graph.open(self.path)
Exemple #54
0
class HistoryFreeToFileStorage(RelStorageTestBase, BasicRecoveryStorage):

    keep_history = False

    def setUp(self):
        self._storage = self.make_storage()
        self._dst = FileStorage("Dest.fs", create=True)

    def tearDown(self):
        self._storage.close()
        self._dst.close()
        self._storage.cleanup()
        self._dst.cleanup()

    def new_dest(self):
        return FileStorage("Dest.fs")
Exemple #55
0
def main(argv=sys.argv):
    parser = argparse.ArgumentParser(
        prog="zodbverify",
        description="Verifies that all records in the database can be loaded.",
    )
    parser.add_argument(
        "-f",
        "--zodbfile",
        action="store",
        dest="zodbfile",
        required=True,
        help="Path to file-storage",
    )
    parser.add_argument(
        "-D",
        "--debug",
        action="store_true",
        dest="debug",
        help="pause to debug broken pickles",
    )
    options = parser.parse_args(argv[1:])

    logging.basicConfig(level=logging.INFO)
    storage = FileStorage(options.zodbfile, read_only=True)
    verify_zodb(storage, debug=options.debug)
Exemple #56
0
def open_db(path):
    zdb = {}
    zdb['storage'] = FileStorage(path + 'NBA_Roster_ZODB.fs')
    zdb['db'] = DB(zdb['storage'])
    zdb['connection'] = zdb['db'].open()
    zdb['root'] = zdb['connection'].root()
    return zdb
Exemple #57
0
    def setUp(self):
        if self.url.endswith('.fs'):
            from ZODB.FileStorage import FileStorage
            if os.path.exists(self.path):
                os.unlink('/tmp/zodb_local3.fs')
                os.unlink('/tmp/zodb_local3.fs.index')
                os.unlink('/tmp/zodb_local3.fs.tmp')
                os.unlink('/tmp/zodb_local3.fs.lock')
            openstr = os.path.abspath(os.path.expanduser(self.url[7:]))
            fs = FileStorage(openstr)
        else:
            from ZEO.ClientStorage import ClientStorage
            schema, opts = _parse_rfc1738_args(self.url)
            fs = ClientStorage((opts['host'], int(opts['port'])))
        self.zdb = ZODB.DB(fs)
        self.conn = self.zdb.open()
        root = self.conn.root()
        if 'rdflib' not in root:
            root['rdflib'] = ConjunctiveGraph(self.store_name)
        self.graph = self.g = root['rdflib']

        self.michel = URIRef('michel')
        self.tarek = URIRef('tarek')
        self.bob = URIRef('bob')
        self.likes = URIRef('likes')
        self.hates = URIRef('hates')
        self.pizza = URIRef('pizza')
        self.cheese = URIRef('cheese')
        transaction.commit()
 def setUp(self):
     ZODB.tests.util.TestCase.setUp(self)
     self.path = 'source.fs'
     self.storage = FileStorage(self.path)
     self.populate()
     self.dest = 'dest.fs'
     self.recovered = None
 def start(self):
     self.started =1
     self.path = tempfile.mktemp(suffix=".fs")
     self._storage = FileStorage(self.path)
     self.db = ZODB.DB(self._storage)
     self.do_updates()
     self.pid, self.exit = forker.start_zeo_server(self._storage, self.addr)
    def setUp(self):
        """
        (based on ZODB.ConflictResolution.txt): Create the database for the 
        tests Set the databases. 
        Think of `conn_A` (connection A) as one thread, and `conn_B` 
        (connection B) as a concurrent thread.
        """

        self.testdir = tempfile.mkdtemp()
        self.storage = FileStorage(os.path.join(self.testdir, 'Data.fs'))
        self.db = ZODB.DB(self.storage)

        self.tm_A = transaction.TransactionManager()
        self.conn_A = self.db.open(transaction_manager=self.tm_A)
        p_ConnA = self.conn_A.root()['p'] = PCounter()
        self.tm_A.commit()

        self.tm_B = transaction.TransactionManager()
        self.conn_B = self.db.open(transaction_manager=self.tm_B)
        p_ConnB = self.conn_B.root()['p']
        assert p_ConnA._p_oid == p_ConnB._p_oid

        self.tm_C = transaction.TransactionManager()
        self.conn_C = self.db.open(transaction_manager=self.tm_C)
        p_ConnC = self.conn_B.root()['p']
        assert p_ConnA._p_oid == p_ConnC._p_oid