Ejemplo n.º 1
0
    def get_thread_dict(self, root):
        # This is vicious:  multiple threads are slamming changes into the
        # root object, then trying to read the root object, simultaneously
        # and without any coordination.  Conflict errors are rampant.  It
        # used to go around at most 10 times, but that fairly often failed
        # to make progress in the 7-thread tests on some test boxes.  Going
        # around (at most) 1000 times was enough so that a 100-thread test
        # reliably passed on Tim's hyperthreaded WinXP box (but at the
        # original 10 retries, the same test reliably failed with 15 threads).
        name = self.getName()
        MAXRETRIES = 1000

        for i in range(MAXRETRIES):
            try:
                root[name] = PersistentMapping()
                transaction.commit()
                break
            except ConflictError:
                root._p_jar.sync()
        else:
            raise ConflictError("Exceeded %d attempts to store" % MAXRETRIES)

        for j in range(MAXRETRIES):
            try:
                return root.get(name)
            except ConflictError:
                root._p_jar.sync()

        raise ConflictError("Exceeded %d attempts to read" % MAXRETRIES)
Ejemplo n.º 2
0
    def _p_resolveConflict(self, old, committed, new):
        oldob = State(old)
        committedob = State(committed)
        newob = State(new)

        if not oldob.period == committedob.period == newob.period:
            raise ConflictError('Conflicting periods')

        if not oldob.timeout == committedob.timeout == newob.timeout:
            raise ConflictError('Conflicting timeouts')

        o_head, o_tail = _head_and_tail(oldob)
        c_head, c_tail = _head_and_tail(committedob)
        n_head, n_tail = _head_and_tail(newob)

        # We are operating on the __setstate__ representation of the
        # attached ListNodes (no clue why, we're operating on the
        # __getstate__ representation of the PersistentMapping object
        # in SessionDataObject).
        o_head_ts = o_head.ob[0]
        if o_head_ts < c_tail.ob[0]:
            raise ConflictError('Committed has obsoleted old')

        if o_head_ts < n_tail.ob[0]:
            raise ConflictError('New has obsoleted old')

        # find any slices added in 'new'
        n_added = _prefix(n_head, o_head_ts)

        # and portion of 'committed' newer than new's tail.
        c_rest = _prefix(c_head, n_tail.ob[0], include_match=True)

        head = deserialize(n_added + c_rest)
        new['head'] = head
        return new
Ejemplo n.º 3
0
    def _commit(self, transaction):
        """Commit changes to an object"""

        if self._import:
            # We are importing an export file. We alsways do this
            # while making a savepoint so we can copy export data
            # directly to our storage, typically a TmpStore.
            self._importDuringCommit(transaction, *self._import)
            self._import = None

        # Just in case an object is added as a side-effect of storing
        # a modified object.  If, for example, a __getstate__() method
        # calls add(), the newly added objects will show up in
        # _added_during_commit.  This sounds insane, but has actually
        # happened.

        self._added_during_commit = []

        if self._invalidatedCache:
            raise ConflictError()

        for obj in self._registered_objects:
            oid = obj._p_oid
            assert oid
            if oid in self._conflicts:
                raise ReadConflictError(object=obj)

            if obj._p_jar is not self:
                raise InvalidObjectReference(obj, obj._p_jar)
            elif oid in self._added:
                assert obj._p_serial == z64
            elif obj._p_changed:
                if oid in self._invalidated:
                    resolve = getattr(obj, "_p_resolveConflict", None)
                    if resolve is None:
                        raise ConflictError(object=obj)
                self._modified.append(oid)
            else:
                # Nothing to do.  It's been said that it's legal, e.g., for
                # an object to set _p_changed to false after it's been
                # changed and registered.
                continue

            self._store_objects(ObjectWriter(obj), transaction)

        for obj in self._added_during_commit:
            self._store_objects(ObjectWriter(obj), transaction)
        self._added_during_commit = None
Ejemplo n.º 4
0
 def _query(self, query, allow_reconnect=False):
     """
       Send a query to MySQL server.
       It reconnects automatically if needed and the following conditions are
       met:
        - It has not just tried to reconnect (ie, this function will not
          attempt to connect twice per call).
        - This connection is not transactional and has set not MySQL locks,
          because they are bound to the connection. This check can be
          overridden by passing allow_reconnect with True value.
     """
     try:
         self.db.query(query)
     except OperationalError, m:
         if m[0] in query_syntax_error:
           raise OperationalError(m[0], '%s: %s' % (m[1], query))
         if m[0] in lock_error:
           raise ConflictError('%s: %s: %s' % (m[0], m[1], query))
         if m[0] in query_timeout_error:
           raise TimeoutReachedError('%s: %s: %s' % (m[0], m[1], query))
         if (allow_reconnect or not self._use_TM) and \
           m[0] in hosed_connection:
           self._forceReconnection()
           self.db.query(query)
         else:
           LOG('ZMySQLDA', ERROR, 'query failed: %s' % (query,))
           raise
    def test_ConflictErrorDoesntImport(self):
        from ZODB.serialize import ObjectWriter
        from ZODB.POSException import ConflictError
        from ZODB.tests.MinPO import MinPO

        obj = MinPO()
        data = ObjectWriter().serialize(obj)

        # The pickle contains a GLOBAL ('c') opcode resolving to MinPO's
        # module and class.
        self.assertTrue(b'cZODB.tests.MinPO\nMinPO\n' in data)

        # Fiddle the pickle so it points to something "impossible" instead.
        data = data.replace(
            b'cZODB.tests.MinPO\nMinPO\n',
            b'cpath.that.does.not.exist\nlikewise.the.class\n')
        # Pickle can't resolve that GLOBAL opcode -- gets ImportError.
        self.assertRaises(ImportError, loads, data)

        # Verify that building ConflictError doesn't get ImportError.
        try:
            raise ConflictError(object=obj, data=data)
        except ConflictError as detail:
            # And verify that the msg names the impossible path.
            self.assertTrue(
                'path.that.does.not.exist.likewise.the.class' in str(detail))
        else:
            self.fail("expected ConflictError, but no exception raised")
Ejemplo n.º 6
0
    def test_traversing(self):
        site = self.stub()
        self.expect(site.getSiteManager()).call(getGlobalSiteManager)
        self.expect(site.absolute_url()).result('http://nohost/plone')

        request = self.create_dummy(form={'ori': 'formdata'},
                                    RESPONSE=self.create_dummy(headers={}))
        self.expect(site.REQUEST).result(request)

        assertion_data = self.create_dummy()

        def view_method():
            assertion_data.form = request.form.copy()
            return 'the url %s@@view should be replaced' % (
                PORTAL_URL_PLACEHOLDER)

        with self.mocker.order():
            # test 1
            self.expect(
                site.restrictedTraverse('baz/@@view')()).call(view_method)
            # test 2
            self.expect(site.restrictedTraverse('baz/@@view')()).throw(
                Exception('failed'))
            # test 3
            self.expect(site.restrictedTraverse('baz/@@view')()).throw(
                ConflictError())

        self.replay()

        utility = getUtility(IBridgeRequest)
        setSite(site)

        # test 1
        response = utility('current-client',
                           'baz/@@view?foo=bar&baz=%s' %
                           PORTAL_URL_PLACEHOLDER,
                           data={'url': PORTAL_URL_PLACEHOLDER})
        self.assertEqual(
            assertion_data.form, {
                'foo': 'bar',
                'url': 'http://nohost/plone/',
                'baz': 'http://nohost/plone/'
            })
        self.assertEqual(request.form, {'ori': 'formdata'})
        self.assertEqual(response.code, 200)
        self.assertEqual(
            response.read(),
            'the url http://nohost/plone/@@view should be replaced')

        # test 2
        with self.assertRaises(urllib2.HTTPError) as cm:
            response = utility('current-client', 'baz/@@view?foo=bar')
        response = cm.exception
        self.assertEqual(request.form, {'ori': 'formdata'})
        self.assertEqual(response.code, 500)
        self.assertIn(response.read(), 'failed')

        # test 3
        with self.assertRaises(ConflictError):
            utility('current-client', 'baz/@@view?foo=bar')
Ejemplo n.º 7
0
 def f():
     try:
         raise ConflictError()
     except:
         raise Retry(sys.exc_info()[0],
                     sys.exc_info()[1],
                     sys.exc_info()[2])
Ejemplo n.º 8
0
def tryToResolveConflict(self,
                         oid,
                         committedSerial,
                         oldSerial,
                         newpickle,
                         committedData=''):
    # class_tuple, old, committed, newstate = ('',''), 0, 0, 0
    try:
        prfactory = PersistentReferenceFactory()
        newpickle = self._crs_untransform_record_data(newpickle)
        file = StringIO(newpickle)
        unpickler = Unpickler(file)
        unpickler.find_global = find_global
        unpickler.persistent_load = prfactory.persistent_load
        meta = unpickler.load()
        if isinstance(meta, tuple):
            klass = meta[0]
            newargs = meta[1] or ()
            if isinstance(klass, tuple):
                klass = find_global(*klass)
        else:
            klass = meta
            newargs = ()

        if klass in _unresolvable:
            raise ConflictError

        newstate = unpickler.load()
        inst = klass.__new__(klass, *newargs)

        try:
            resolve = inst._p_resolveConflict
        except AttributeError:
            _unresolvable[klass] = 1
            raise ConflictError

        old = state(self, oid, oldSerial, prfactory)
        committed = state(self, oid, committedSerial, prfactory, committedData)

        resolved = resolve(old, committed, newstate)

        file = StringIO()
        pickler = Pickler(file, 1)
        pickler.inst_persistent_id = persistent_id
        pickler.dump(meta)
        pickler.dump(resolved)
        return self._crs_transform_record_data(file.getvalue(1))
    except (ConflictError, BadClassName):
        pass
    except:
        # If anything else went wrong, catch it here and avoid passing an
        # arbitrary exception back to the client.  The error here will mask
        # the original ConflictError.  A client can recover from a
        # ConflictError, but not necessarily from other errors.  But log
        # the error so that any problems can be fixed.
        logger.error("Unexpected error", exc_info=True)

    raise ConflictError(oid=oid,
                        serials=(committedSerial, oldSerial),
                        data=newpickle)
Ejemplo n.º 9
0
    def _store_objects(self, writer, transaction):
        for obj in writer:
            oid = obj._p_oid
            serial = getattr(obj, "_p_serial", z64)

            if ((serial == z64)
                    and ((self._savepoint_storage is None) or
                         (oid not in self._savepoint_storage.creating)
                         or self._savepoint_storage.creating[oid])):

                # obj is a new object

                # Because obj was added, it is now in _creating, so it
                # can be removed from _added.  If oid wasn't in
                # adding, then we are adding it implicitly.

                implicitly_adding = self._added.pop(oid, None) is None

                self._creating[oid] = implicitly_adding

            else:
                if (oid in self._invalidated
                        and not hasattr(obj, '_p_resolveConflict')):
                    raise ConflictError(object=obj)
                self._modified.append(oid)
            p = writer.serialize(obj)  # This calls __getstate__ of obj

            if isinstance(obj, Blob):
                if not IBlobStorage.providedBy(self._storage):
                    raise Unsupported("Storing Blobs in %s is not supported." %
                                      repr(self._storage))
                if obj.opened():
                    raise ValueError("Can't commit with opened blobs.")
                s = self._storage.storeBlob(oid, serial, p, obj._uncommitted(),
                                            self._version, transaction)
                # we invalidate the object here in order to ensure
                # that that the next attribute access of its name
                # unghostify it, which will cause its blob data
                # to be reattached "cleanly"
                obj._p_invalidate()
            else:
                s = self._storage.store(oid, serial, p, self._version,
                                        transaction)
            self._cache.update_object_size_estimation(oid, len(p))
            obj._p_estimated_size = len(p)
            self._store_count += 1
            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                self._cache[oid] = obj
            except:
                # Dang, I bet it's wrapped:
                # TODO:  Deprecate, then remove, this.
                if hasattr(obj, 'aq_base'):
                    self._cache[oid] = obj.aq_base
                else:
                    raise

            self._handle_serial(s, oid)
Ejemplo n.º 10
0
def never_resolve_conflict(oid,
                           committedSerial,
                           oldSerial,
                           newpickle,
                           committedData=b''):
    raise ConflictError(oid=oid,
                        serials=(committedSerial, oldSerial),
                        data=newpickle)
Ejemplo n.º 11
0
def retry_request_on_conflict():
    """Converts an SQLIntegrityError to a conflict error (which triggers a retry)"""
    try:
        yield
    except IntegrityError as e:
        if 'duplicate key value violates' not in str(e):
            raise
        raise ConflictError(str(e))
Ejemplo n.º 12
0
 def traverse(self, *unused, **unused_kw):
     action = self.action
     if action.startswith('fail'):
         raise Exception(action)
     if action == 'conflict':
         raise ConflictError()
     if action == 'succeed':
         return _succeed
     else:
         raise ValueError('unknown action: %s' % action)
Ejemplo n.º 13
0
 def store(self, oid, serial, data, version, transaction):
     try:
         return super(DemoStorage, self).store(oid, serial, data, version,
                                               transaction)
     except ConflictError, e:
         old = e.serials[0]
         rdata = self.tryToResolveConflict(oid, old, serial, data)
         if rdata is None:  # BBB: Zope < 2.13
             raise ConflictError(oid=oid, serials=(old, serial), data=data)
         self.changes.store(oid, old, rdata, '', transaction)
         return ResolvedSerial
Ejemplo n.º 14
0
    def query(self, query_string, max_rows=None, query_data=None):
        self._register()
        self.calls = self.calls + 1

        desc = ()
        res = []
        nselects = 0

        c = self.getcursor()

        try:
            for qs in [x for x in query_string.split('\0') if x]:
                try:
                    if query_data:
                        c.execute(qs, query_data)
                    else:
                        c.execute(qs)
                except TransactionRollbackError:
                    # Ha, here we have to look like we are the ZODB
                    # raising conflict errrors, raising ZPublisher.Publish.
                    # Retry just doesn't work
                    # logging.debug(
                    #     "Serialization Error, retrying transaction",
                    #     exc_info=True)
                    raise ConflictError(
                        "TransactionRollbackError from psycopg2")
                except psycopg2.OperationalError:
                    #logging.exception("Operational error on connection,
                    # closing it.")
                    try:
                        # Only close our connection
                        self.putconn(True)
                    except:
                        #logging.debug("Something went wrong when we tried
                        # to close the pool", exc_info=True)
                        pass
                if c.description is not None:
                    nselects += 1
                    if c.description != desc and nselects > 1:
                        raise psycopg2.ProgrammingError(
                            'multiple selects in single query not allowed')
                    if max_rows:
                        res = c.fetchmany(max_rows)
                    else:
                        res = c.fetchall()
                    desc = c.description
            self.failures = 0

        except Exception as err:
            self._abort()
            raise err

        return self.convert_description(desc), res
Ejemplo n.º 15
0
    def testCustomExceptionViewConflictErrorHandling(self):
        # Make sure requests are retried as often as configured
        # even if an exception view has been registered that
        # matches ConflictError
        from zope.interface import directlyProvides
        from zope.publisher.browser import IDefaultBrowserLayer
        registerExceptionView(Exception)
        environ = self._makeEnviron()
        start_response = DummyCallable()
        _publish = DummyCallable()
        _publish._raise = ConflictError('oops')
        _request = DummyRequest()
        directlyProvides(_request, IDefaultBrowserLayer)
        _request.response = DummyResponse()
        _request.retry_count = 0
        _request.retry_max_count = 2
        _request.environ = {}

        def _close():
            pass

        _request.close = _close

        def _retry():
            _request.retry_count += 1
            return _request

        _request.retry = _retry

        def _supports_retry():
            return _request.retry_count < _request.retry_max_count

        _request.supports_retry = _supports_retry

        def _request_factory(stdin, environ, response):
            return _request

        # At first, retry_count is zero. Request has never been retried.
        self.assertEqual(_request.retry_count, 0)
        app_iter = self._callFUT(environ,
                                 start_response,
                                 _publish,
                                 _request_factory=_request_factory)

        # In the end the error view is rendered, but the request should
        # have been retried up to retry_max_count times
        self.assertTrue(
            app_iter[1].startswith('Exception View: ConflictError'))
        self.assertEqual(_request.retry_count, _request.retry_max_count)
        unregisterExceptionView(Exception)
Ejemplo n.º 16
0
    def _p_resolveConflict(self, old, committed, new):
        o_m_layers, o_m_length, o_layers = old
        c_m_layers, c_m_length, c_layers = committed
        m_layers = [x[:] for x in c_layers]
        n_m_layers, n_m_length, n_layers = new
        
        if not o_m_layers == n_m_layers == n_m_layers:
            raise ConflictError('Conflicting max layers')

        if not o_m_length == c_m_length == n_m_length:
            raise ConflictError('Conflicting max length')

        o_latest_gen = o_layers[-1][0]
        o_latest_items = o_layers[-1][1]
        c_earliest_gen = c_layers[0][0]
        n_earliest_gen = n_layers[0][0]

        if o_latest_gen < c_earliest_gen:
            raise ConflictError('Committed obsoletes old')

        if o_latest_gen < n_earliest_gen:
            raise ConflictError('New obsoletes old')

        new_objects = []
        for n_generation, n_items in n_layers:
            if n_generation == o_latest_gen:
                new_objects.extend(n_items[len(o_latest_items):])
            elif n_generation > o_latest_gen:
                new_objects.extend(n_items)

        while new_objects:
            to_push, new_objects = new_objects[0], new_objects[1:]
            if len(m_layers[-1][1]) == c_m_length:
                m_layers.append((m_layers[-1][0]+1, []))
            m_layers[-1][1].append(to_push)

        return c_m_layers, c_m_length, m_layers[-c_m_layers:]
    def testExceptionSideEffects(self):
        from zope.publisher.interfaces import IExceptionSideEffects

        class SideEffects(object):
            implements(IExceptionSideEffects)

            def __init__(self, exception):
                self.exception = exception

            def __call__(self, obj, request, exc_info):
                self.obj = obj
                self.request = request
                self.exception_type = exc_info[0]
                self.exception_from_info = exc_info[1]

        class SideEffectsFactory:
            def __call__(self, exception):
                self.adapter = SideEffects(exception)
                return self.adapter

        factory = SideEffectsFactory()
        from ZODB.POSException import ConflictError
        from zope.interface import Interface

        class IConflictError(Interface):
            pass

        classImplements(ConflictError, IConflictError)
        component.provideAdapter(factory, (IConflictError, ),
                                 IExceptionSideEffects)
        exception = ConflictError()
        try:
            raise exception
        except:
            pass
        self.publication.handleException(self.object,
                                         self.request,
                                         sys.exc_info(),
                                         retry_allowed=False)
        adapter = factory.adapter
        self.assertEqual(exception, adapter.exception)
        self.assertEqual(exception, adapter.exception_from_info)
        self.assertEqual(ConflictError, adapter.exception_type)
        self.assertEqual(self.object, adapter.obj)
        self.assertEqual(self.request, adapter.request)
Ejemplo n.º 18
0
    def _p_resolveConflict(self, old, committed, new):
        # dict modifiers set '_lm'.
        if committed['_lm'] != new['_lm']:
            # we are operating against the PersistentMapping.__getstate__
            # representation, which aliases '_container' to self.data.
            c_data = committed['data']
            n_data = new['data']
            if c_data != n_data:
                msg = "Competing writes to session data: \n%s\n----\n%s" % (
                    pprint.pformat(c_data), pprint.pformat(n_data))
                raise ConflictError(msg)

        resolved = dict(new)
        invalid = committed.get('_iv') or new.get('_iv')
        if invalid:
            resolved['_iv'] = True
        resolved['_lm'] = max(committed['_lm'], new['_lm'])
        return resolved
Ejemplo n.º 19
0
    def _store_objects(self, writer, transaction):
        for obj in writer:
            oid = obj._p_oid
            serial = getattr(obj, "_p_serial", z64)

            if serial == z64:
                # obj is a new object

                # Because obj was added, it is now in _creating, so it
                # can be removed from _added.  If oid wasn't in
                # adding, then we are adding it implicitly.

                implicitly_adding = self._added.pop(oid, None) is None

                self._creating[oid] = implicitly_adding

            else:
                if (oid in self._invalidated
                        and not hasattr(obj, '_p_resolveConflict')):
                    raise ConflictError(object=obj)
                self._modified.append(oid)
            p = writer.serialize(obj)  # This calls __getstate__ of obj
            s = self._storage.store(oid, serial, p, self._version, transaction)
            self._store_count += 1
            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                self._cache[oid] = obj
            except:
                # Dang, I bet it's wrapped:
                # TODO:  Deprecate, then remove, this.
                if hasattr(obj, 'aq_base'):
                    self._cache[oid] = obj.aq_base
                else:
                    raise

            self._handle_serial(s, oid)
Ejemplo n.º 20
0
 def __getattr__(self, id, default=object()):
     raise ConflictError('testing')
Ejemplo n.º 21
0
 def _handleConflicts(self, txn_context):
     data_dict = txn_context.data_dict
     pop_conflict = txn_context.conflict_dict.popitem
     resolved_dict = txn_context.resolved_dict
     tryToResolveConflict = txn_context.Storage.tryToResolveConflict
     while 1:
         # We iterate over conflict_dict, and clear it,
         # because new items may be added by calls to _store.
         # This is also done atomically, to avoid race conditions
         # with PrimaryNotificationsHandler.notifyDeadlock
         try:
             oid, serial = pop_conflict()
         except KeyError:
             return
         try:
             data, old_serial, _ = data_dict.pop(oid)
         except KeyError:
             assert oid is None, (oid, serial)
             # Storage refused us from taking object lock, to avoid a
             # possible deadlock. TID is actually used for some kind of
             # "locking priority": when a higher value has the lock,
             # this means we stored objects "too late", and we would
             # otherwise cause a deadlock.
             # To recover, we must ask storages to release locks we
             # hold (to let possibly-competing transactions acquire
             # them), and requeue our already-sent store requests.
             ttid = txn_context.ttid
             logging.info('Deadlock avoidance triggered for TXN %s'
               ' with new locking TID %s', dump(ttid), dump(serial))
             txn_context.locking_tid = serial
             packet = Packets.AskRebaseTransaction(ttid, serial)
             for uuid in txn_context.conn_dict:
                 self._askStorageForWrite(txn_context, uuid, packet)
         else:
             if data is CHECKED_SERIAL:
                 raise ReadConflictError(oid=oid,
                     serials=(serial, old_serial))
             # TODO: data can be None if a conflict happens during undo
             if data:
                 txn_context.data_size -= len(data)
             if self.last_tid < serial:
                 self.sync() # possible late invalidation (very rare)
             try:
                 data = tryToResolveConflict(oid, serial, old_serial, data)
             except ConflictError:
                 logging.info(
                     'Conflict resolution failed for %s@%s with %s',
                     dump(oid), dump(old_serial), dump(serial))
                 # With recent ZODB, get_pickle_metadata (from ZODB.utils)
                 # does not support empty values, so do not pass 'data'
                 # in this case.
                 raise ConflictError(oid=oid, serials=(serial, old_serial),
                                     data=data or None)
             else:
                 logging.info(
                     'Conflict resolution succeeded for %s@%s with %s',
                     dump(oid), dump(old_serial), dump(serial))
                 # Mark this conflict as resolved
                 resolved_dict[oid] = serial
                 # Try to store again
                 self._store(txn_context, oid, serial, data)
Ejemplo n.º 22
0
 def _handleConflicts(self, txn_context, tryToResolveConflict):
     result = []
     append = result.append
     # Check for conflicts
     data_dict = txn_context['data_dict']
     object_base_serial_dict = txn_context['object_base_serial_dict']
     object_serial_dict = txn_context['object_serial_dict']
     conflict_serial_dict = txn_context['conflict_serial_dict'].copy()
     txn_context['conflict_serial_dict'].clear()
     resolved_conflict_serial_dict = txn_context[
         'resolved_conflict_serial_dict']
     for oid, conflict_serial_set in conflict_serial_dict.iteritems():
         conflict_serial = max(conflict_serial_set)
         serial = object_serial_dict[oid]
         if ZERO_TID in conflict_serial_set:
             if 1:
                 # XXX: disable deadlock avoidance code until it is fixed
                 logging.info('Deadlock avoidance on %r:%r', dump(oid),
                              dump(serial))
                 # 'data' parameter of ConflictError is only used to report the
                 # class of the object. It doesn't matter if 'data' is None
                 # because the transaction is too big.
                 try:
                     data = data_dict[oid]
                 except KeyError:
                     data = txn_context['cache_dict'][oid]
             else:
                 # Storage refused us from taking object lock, to avoid a
                 # possible deadlock. TID is actually used for some kind of
                 # "locking priority": when a higher value has the lock,
                 # this means we stored objects "too late", and we would
                 # otherwise cause a deadlock.
                 # To recover, we must ask storages to release locks we
                 # hold (to let possibly-competing transactions acquire
                 # them), and requeue our already-sent store requests.
                 # XXX: currently, brute-force is implemented: we send
                 # object data again.
                 # WARNING: not maintained code
                 logging.info('Deadlock avoidance triggered on %r:%r',
                              dump(oid), dump(serial))
                 for store_oid, store_data in data_dict.iteritems():
                     store_serial = object_serial_dict[store_oid]
                     if store_data is CHECKED_SERIAL:
                         self._checkCurrentSerialInTransaction(
                             txn_context, store_oid, store_serial)
                     else:
                         if store_data is None:
                             # Some undo
                             logging.warning(
                                 'Deadlock avoidance cannot reliably'
                                 ' work with undo, this must be implemented.'
                             )
                             conflict_serial = ZERO_TID
                             break
                         self._store(txn_context,
                                     store_oid,
                                     store_serial,
                                     store_data,
                                     unlock=True)
                 else:
                     continue
         else:
             data = data_dict.pop(oid)
             if data is CHECKED_SERIAL:
                 raise ReadConflictError(oid=oid,
                                         serials=(conflict_serial, serial))
             # TODO: data can be None if a conflict happens during undo
             if data:
                 txn_context['data_size'] -= len(data)
             resolved_serial_set = resolved_conflict_serial_dict.setdefault(
                 oid, set())
             if resolved_serial_set and conflict_serial <= max(
                     resolved_serial_set):
                 # A later serial has already been resolved, skip.
                 resolved_serial_set.update(conflict_serial_set)
                 continue
             try:
                 new_data = tryToResolveConflict(oid, conflict_serial,
                                                 serial, data)
             except ConflictError:
                 logging.info(
                     'Conflict resolution failed for '
                     '%r:%r with %r', dump(oid), dump(serial),
                     dump(conflict_serial))
             else:
                 logging.info(
                     'Conflict resolution succeeded for '
                     '%r:%r with %r', dump(oid), dump(serial),
                     dump(conflict_serial))
                 # Mark this conflict as resolved
                 resolved_serial_set.update(conflict_serial_set)
                 # Base serial changes too, as we resolved a conflict
                 object_base_serial_dict[oid] = conflict_serial
                 # Try to store again
                 self._store(txn_context, oid, conflict_serial, new_data)
                 append(oid)
                 continue
         # With recent ZODB, get_pickle_metadata (from ZODB.utils) does
         # not support empty values, so do not pass 'data' in this case.
         raise ConflictError(oid=oid,
                             serials=(conflict_serial, serial),
                             data=data or None)
     return result
Ejemplo n.º 23
0
    def __check_and_resolve_conflicts(self, storage, conflicts):
        """
        Either raises an `ConflictError`, or successfully resolves
        all conflicts.

        Returns a set of int OIDs for objects modified in this transaction
        but which were then updated by conflict resolution and so must
        be invalidated.

        All the rows needed for detecting conflicts should be locked against
        concurrent changes.
        """
        # pylint:disable=too-many-locals
        cursor = self.store_connection.cursor
        adapter = self.adapter
        cache = self.cache
        tryToResolveConflict = storage.tryToResolveConflict

        # Detect conflicting changes.
        # Try to resolve the conflicts.
        invalidated_oid_ints = set()

        # In the past, we didn't load all conflicts from the DB at
        # once, just one at a time. This was because we also fetched
        # the new state data from the DB, and it could be large (if
        # lots of conflicts). But now we use the state we have in our
        # local temp cache for the new state, so we don't need to
        # fetch it, meaning this result will be small.
        #
        # The resolution process needs three pickles: the one we tried
        # to save, the one we're based off of, and the one currently
        # committed. The new one is passed as a parameter; the one
        # currently committed can optionally be passed (if not,
        # loadSerial() is used to get it), and the one we were based
        # off of is always loaded with loadSerial(). We *probably*
        # have the one we're based off of already in our storage
        # cache; the one that's currently committed is, I think, less
        # likely to be there, so there may be some benefit from
        # returning it in the conflict query. If we have a cache miss
        # and have to go to the database, that's bad: we're holding
        # object locks at this point so we're potentially blocking
        # other transactions.
        required_tids = self.required_tids
        self.count_conflicts = count_conflicts = len(conflicts)
        if count_conflicts:
            logger.debug("Attempting to resolve %d conflicts", count_conflicts)

        for conflict in conflicts:
            oid_int, committed_tid_int, tid_this_txn_saw_int, committed_state = conflict
            if tid_this_txn_saw_int is None:
                # A readCurrent entry. Did it conflict?
                expect_tid_int = required_tids[oid_int]
                if committed_tid_int != expect_tid_int:
                    raise VoteReadConflictError(
                        oid=int64_to_8bytes(oid_int),
                        serials=(int64_to_8bytes(committed_tid_int),
                                 int64_to_8bytes(expect_tid_int)))
                continue

            state_from_this_txn = cache.read_temp(oid_int)
            oid = int64_to_8bytes(oid_int)
            prev_tid = int64_to_8bytes(committed_tid_int)
            serial = int64_to_8bytes(tid_this_txn_saw_int)
            resolved_state = tryToResolveConflict(oid, prev_tid, serial,
                                                  state_from_this_txn,
                                                  committed_state)

            if resolved_state is None:
                # unresolvable; kill the whole transaction
                raise ConflictError(oid=oid,
                                    serials=(prev_tid, serial),
                                    data=state_from_this_txn)

            # resolved
            invalidated_oid_ints.add(oid_int)
            cache.store_temp(oid_int, resolved_state, committed_tid_int)

        if invalidated_oid_ints:
            # We resolved some conflicts, so we need to send them over to the database.
            adapter.mover.replace_temps(
                cursor,
                self.cache.temp_objects.iter_for_oids(invalidated_oid_ints))

        return invalidated_oid_ints
Ejemplo n.º 24
0
def tryToResolveConflict(self,
                         oid,
                         committedSerial,
                         oldSerial,
                         newpickle,
                         committedData=b''):
    # class_tuple, old, committed, newstate = ('',''), 0, 0, 0
    klass = 'n/a'
    try:
        prfactory = PersistentReferenceFactory()
        newpickle = self._crs_untransform_record_data(newpickle)
        file = BytesIO(newpickle)
        unpickler = PersistentUnpickler(find_global, prfactory.persistent_load,
                                        file)
        meta = unpickler.load()
        if isinstance(meta, tuple):
            klass = meta[0]
            newargs = meta[1] or ()
            if isinstance(klass, tuple):
                klass = find_global(*klass)
        else:
            klass = meta
            newargs = ()

        if klass in _unresolvable:
            raise ConflictError

        inst = klass.__new__(klass, *newargs)

        try:
            resolve = inst._p_resolveConflict
        except AttributeError:
            _unresolvable[klass] = 1
            raise ConflictError

        oldData = self.loadSerial(oid, oldSerial)
        if not committedData:
            committedData = self.loadSerial(oid, committedSerial)

        newstate = unpickler.load()
        old = state(self, oid, oldSerial, prfactory, oldData)
        committed = state(self, oid, committedSerial, prfactory, committedData)

        resolved = resolve(old, committed, newstate)

        file = BytesIO()
        pickler = PersistentPickler(persistent_id, file, _protocol)
        pickler.dump(meta)
        pickler.dump(resolved)
        return self._crs_transform_record_data(file.getvalue())
    except (ConflictError, BadClassName) as e:
        logger.debug("Conflict resolution on %s failed with %s: %s", klass,
                     e.__class__.__name__, str(e))
    except:
        # If anything else went wrong, catch it here and avoid passing an
        # arbitrary exception back to the client.  The error here will mask
        # the original ConflictError.  A client can recover from a
        # ConflictError, but not necessarily from other errors.  But log
        # the error so that any problems can be fixed.
        logger.exception(
            "Unexpected error while trying to resolve conflict on %s", klass)

    raise ConflictError(oid=oid,
                        serials=(committedSerial, oldSerial),
                        data=newpickle)
Ejemplo n.º 25
0
def badtx(self):
    raise ConflictError("boom")
Ejemplo n.º 26
0
    def __check_and_resolve_conflicts(self, storage, conflicts):
        """
        Either raises an `ConflictError`, or successfully resolves
        all conflicts.

        Returns a set of int OIDs for objects modified in this transaction
        but which were then updated by conflict resolution and so must
        be invalidated.

        All the rows needed for detecting conflicts should be locked against
        concurrent changes.

        :param conflicts: A sequence of information needed for detecting
           and resolving conflicts:
           ``(oid_int, committed_tid_int, tid_this_txn_saw_int, committed_state)``.
           If ``tid_this_txn_saw_int`` is None, it was a read-current check,
           and unless the ``committed_tid_int`` matches the expected value,
           a conflict error is raised.
        """
        # pylint:disable=too-many-locals
        invalidated_oid_ints = set()
        if not conflicts:
            return invalidated_oid_ints

        self.count_conflicts = count_conflicts = len(conflicts)

        # In the past, we didn't load all conflicts from the DB at
        # once, just one at a time. This was because we also fetched
        # the new state data from the DB, and it could be large (if
        # lots of conflicts). But now we use the state we have in our
        # local temp cache for the new state, so we don't need to
        # fetch it, meaning this result will be small...
        #
        # ...almost. The resolution process needs three pickles: the
        # one we tried to save, the one we're based off of, and the
        # one currently committed. Remember we have locked objects at
        # this point, so we need to finish ASAP to not block other
        # transactions; in gevent, we need to also avoid giving up
        # control to the event loop for arbitrary periods of time too
        # as it could take a long time to get back to us.

        # - The one we tried to save (the new one) is passed as a
        # parameter. We read this from our local storage, which is
        # probably in memory and thus fast.
        #
        # - The one currently committed can optionally be passed, and
        # if not, loadSerial() is used to get it. It seems somewhat
        # unlikely that it's not in the local pickle cache, so we
        # probably benefit from returning it in the conflict query.
        #
        # - The one we were based off of is always loaded with
        # loadSerial(). We *possibly* have the one we're based off of
        # already in our storage cache, but there's no guarantee. So
        # it's best to prefetch all these things in order to limit the
        # number of database round-trips and the opportunity to block
        # for arbitrary periods of time.
        logger.debug("Attempting to resolve %d conflicts", count_conflicts)

        required_tids = self.required_tids
        old_states_to_prefetch = []
        actual_conflicts = []
        # First, go through and distinguish read-current conflicts from
        # state conflicts (if the adapter didn't do that already).
        for conflict in conflicts:
            oid_int, committed_tid_int, tid_this_txn_saw_int, _ = conflict
            if tid_this_txn_saw_int is not None:
                # An actual conflict. We need the state.
                actual_conflicts.append(conflict)
                old_states_to_prefetch.append((oid_int, tid_this_txn_saw_int))
            else:
                # A readCurrent entry. Did it conflict?
                # Note that some database adapters (MySQL) may have already raised a
                # UnableToLockRowsToReadCurrentError indicating a conflict. That's a type
                # of ReadConflictError like this.
                expect_tid_int = required_tids[oid_int]
                if committed_tid_int != expect_tid_int:
                    raise VoteReadConflictError(
                        oid=int64_to_8bytes(oid_int),
                        serials=(int64_to_8bytes(committed_tid_int),
                                 int64_to_8bytes(expect_tid_int)))

        if not actual_conflicts:
            # Nothing to prefetch or resolve. No need to go critical,
            # we have no other opportunities to switch.
            return invalidated_oid_ints

        # We're probably going to need to make a database query. Elevate our
        # priority and regain control ASAP.
        self.__enter_critical_phase_until_transaction_end()

        old_states_and_tids = self.shared_state.cache.prefetch_for_conflicts(
            self.shared_state.load_connection.cursor, old_states_to_prefetch)

        tryToResolveConflict = _CachedConflictResolver(
            storage, old_states_and_tids).tryToResolveConflict

        adapter = self.shared_state.adapter
        read_temp = self.shared_state.temp_storage.read_temp
        store_temp = self.shared_state.temp_storage.store_temp

        # The conflicts can be very large binary strings, no need to include
        # them in traceback info. (Plus they could be sensitive.)
        __traceback_info__ = count_conflicts, invalidated_oid_ints

        for conflict in actual_conflicts:
            # Match the names of the arguments used
            oid_int, committed_tid_int, tid_this_txn_saw_int, committedData = conflict

            oid = int64_to_8bytes(oid_int)
            committedSerial = int64_to_8bytes(committed_tid_int)
            oldSerial = int64_to_8bytes(tid_this_txn_saw_int)
            newpickle = read_temp(oid_int)

            # Because we're using the _CachedConflictResolver, we can only loadSerial()
            # one state: the ``oldSerial`` state. Therefore the committedData *must* be
            # given.

            resolved_state = tryToResolveConflict(oid, committedSerial,
                                                  oldSerial, newpickle,
                                                  committedData)

            if resolved_state is None:
                # unresolvable; kill the whole transaction
                raise ConflictError(
                    oid=oid,
                    serials=(oldSerial, committedSerial),
                    data=newpickle,
                )

            # resolved
            invalidated_oid_ints.add(oid_int)
            store_temp(oid_int, resolved_state, committed_tid_int)

        # We resolved some conflicts, so we need to send them over to the database.
        adapter.mover.replace_temps(
            self.shared_state.store_connection.cursor,
            self.shared_state.temp_storage.iter_for_oids(invalidated_oid_ints))

        return invalidated_oid_ints
Ejemplo n.º 27
0
    def setstate(self, obj):
        oid = obj._p_oid

        self.before_load()
        try:
            p, serial = self._storage.load(oid, self._version)
            self._load_count = self._load_count + 1

            # XXX this is quite conservative!
            # We need, however, to avoid reading data from a transaction
            # that committed after the current "session" started, as
            # that might lead to mixing of cached data from earlier
            # transactions and new inconsistent data.
            #
            # Note that we (carefully) wait until after we call the
            # storage to make sure that we don't miss an invaildation
            # notifications between the time we check and the time we
            # read.
            #invalid = self._invalidated.get
            invalid = self._invalidated.__contains__
            if invalid(oid) or invalid(None):
                if not hasattr(obj.__class__, '_p_independent'):
                    transaction.get().register(self)
                    raise ReadConflictError(object=obj)
                invalid = 1
            else:
                invalid = 0

            file = StringIO(p)
            unpickler = Unpickler(file)
            # SDH: external references are reassembled elsewhere.
            # unpickler.persistent_load=self._persistent_load
            classification = unpickler.load()
            state = unpickler.load()

            # SDH: Let the object mapper do the state setting.
            # if hasattr(object, '__setstate__'):
            #     object.__setstate__(state)
            # else:
            #     d=object.__dict__
            #     for k,v in state.items(): d[k]=v
            osio = self._get_osio()
            event = osio.deserialize(oid, obj, classification, state)

            if event.upos:
                self._handle_unmanaged(obj, event.upos)

            self._set_serial(obj, serial)

            if invalid:
                if obj._p_independent():
                    try:
                        self._invalidated.remove(oid)
                    except KeyError:
                        pass
                else:
                    transaction.get().register(self)
                    raise ConflictError(object=obj)

        except ConflictError:
            raise
        except:
            LOG('ZODB',
                ERROR,
                "Couldn't load state for %s" % ` oid `,
                error=sys.exc_info())
            raise
Ejemplo n.º 28
0
    def commit(self, obj, transaction):
        if obj is self:
            self._may_begin(transaction)
            # We registered ourself.  Execute a commit action, if any.

            # XXX Where is the _Connection_onCommitActions ever set?
            #            if self._Connection__onCommitActions is not None:
            #                method_name, args, kw = \
            #                             self._Connection__onCommitActions.pop(0)
            #                apply(getattr(self, method_name), (transaction,) + args, kw)
            return
        oid = obj._p_oid
        assert oid != 'unmanaged', repr(obj)
        #invalid=self._invalidated.get
        invalid = self._invalidated.__contains__

        modified = getattr(self, '_modified', None)
        if modified is None:
            modified = self._invalidating

        if oid is None or obj._p_jar is not self:
            # new object
            oid = self.new_oid()
            obj._p_jar = self
            obj._p_oid = oid
            self._creating.append(oid)

        elif obj._p_changed:
            if ((invalid(oid) and not hasattr(obj, '_p_resolveConflict'))
                    or invalid(None)):
                raise ConflictError(object=obj)
            modified.append(oid)

        else:
            # Nothing to do
            return

        self._may_begin(transaction)

        stack = [obj]

        file = StringIO()
        seek = file.seek
        pickler = Pickler(file, 1)
        # SDH: external references are computed in a different way.
        # pickler.persistent_id=new_persistent_id(self, stack.append)
        dbstore = self._storage.store
        file = file.getvalue
        cache = self._cache
        get = cache.get
        dump = pickler.dump
        clear_memo = pickler.clear_memo

        version = self._version

        while stack:
            obj = stack[-1]
            del stack[-1]
            oid = obj._p_oid
            assert oid != 'unmanaged', repr(obj)
            serial = self._get_serial(obj)
            if serial == HASH0:
                # new object
                self._creating[oid] = True
            else:
                #XXX We should never get here
                # SDH: Actually it looks like we should, but only
                # for the first object on the stack.
                if ((invalid(oid) and not hasattr(obj, '_p_resolveConflict'))
                        or invalid(None)):
                    raise ConflictError(object=obj)
                modified.append(oid)

            # SDH: hook in the serializer.
            # state=obj.__getstate__()
            osio = self._get_osio()
            event, classification, state = osio.serialize(oid, obj)
            ext_refs = event.external
            if ext_refs:
                for (ext_oid, ext_ref) in ext_refs:
                    assert ext_oid
                    assert ext_ref is not None
                    if self._cache.get(ext_oid, None) is not ext_ref:
                        # New object or a bad reference
                        if ext_ref._p_jar is not None:
                            if ext_ref._p_jar is not self:
                                raise InvalidObjectReference, (
                                    "Can't refer from %s in %s to %s in %s" %
                                    (repr(obj), repr(self), repr(ext_ref),
                                     repr(ext_ref._p_jar)))
                        else:
                            ext_ref._p_jar = self
                        if ext_ref._p_oid:
                            if ext_ref._p_oid != ext_oid:
                                raise StorageError('Conflicting OIDs')
                        else:
                            ext_ref._p_oid = ext_oid
                        stack.append(ext_ref)

            if event.upos:
                self._handle_unmanaged(obj, event.upos)

            seek(0)
            clear_memo()
            dump(classification)
            dump(state)
            p = file(1)
            s = dbstore(oid, serial, p, version, transaction)
            self._store_count = self._store_count + 1

            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                cache[oid] = obj
            except:
                if aq_base(obj) is not obj:
                    # Yuck, someone tried to store a wrapper.  Try to
                    # cache it unwrapped.
                    cache[oid] = aq_base(obj)
                else:
                    raise

            self._handle_serial(s, oid)
Ejemplo n.º 29
0
 def my_function():
     raise ConflictError()
Ejemplo n.º 30
0
def raise_on_first_add(context, event):
    first = not bool(UIDS)
    UIDS.append(IUUID(context))
    if first:
        raise ConflictError()  # trigger a retry (once)