def __update_symb(self, symb_info): """This method look in a klass or symbol have been renamed or not. If the symbol have not been renamed explicitly, it's loaded and its location is checked to see if it have moved as well. """ if symb_info in self.__changes: self.__changed = True logger.info(u'Change %s to %s' % (symb_info, self.__changes[symb_info])) return self.__changes[symb_info] else: symb = find_global(*symb_info, Broken=ZODBBroken) if is_broken(symb): logger.warning( u'Warning: Missing factory for %s' % u' '.join(symb_info)) create_broken_module_for(symb) elif hasattr(symb, '__name__') and hasattr(symb, '__module__'): new_symb_info = (symb.__module__, symb.__name__) if new_symb_info != symb_info: logger.info( u'New implicit rule detected %s to %s' % (u' '.join(symb_info), u' '.join(new_symb_info))) self.__changes[symb_info] = new_symb_info self.__added[symb_info] = new_symb_info self.__changed = True return new_symb_info return symb_info
def __find_global(self, *klass_info): """Find a class with the given name, looking for a renaming rule first. Using ZODB find_global let us manage missing classes. """ return find_global(*self.__update_symb(klass_info), Broken=ZODBBroken)
def __update_symb(self, symb_info): """This method look in a klass or symbol have been renamed or not. If the symbol have not been renamed explicitly, it's loaded and its location is checked to see if it have moved as well. """ if symb_info in SKIP_SYMBS: self.__skipped = True if symb_info in self.__renames: self.__changed = True return self.__renames[symb_info] else: symb = find_global(*symb_info, Broken=ZODBBroken) if utils.is_broken(symb): logger.warning('Warning: Missing factory for {}'.format( ' '.join(symb_info))) create_broken_module_for(symb) elif hasattr(symb, '__name__') and hasattr(symb, '__module__'): new_symb_info = (symb.__module__, symb.__name__) if new_symb_info != symb_info: logger.info('New implicit rule detected {} to {}'.format( ' '.join(symb_info), ' '.join(new_symb_info))) self.__renames[symb_info] = new_symb_info self.__added[symb_info] = new_symb_info self.__changed = True return new_symb_info return symb_info
def test23_RegistryBasesNotVersionedOrRestored(self): portal_repo = self.portal.portal_repository fol = self.portal.fol fol.setTitle("v1") # Make it a component registry with bases base = aq_base(self.portal.getSiteManager()) components = PersistentComponents() components.__bases__ = (base,) fol.setSiteManager(components) portal_repo.applyVersionControl(fol) broken_iface = broken.find_global( 'never_gonna_be_real', 'IMissing', Broken=ZODB.interfaces.IBroken, type=InterfaceClass) sys.modules[broken_iface.__module__] = module = imp.new_module( broken_iface.__module__) module.IMissing = broken_iface # add a broken registrsation but do a savepoint before # breaking the interfaces to simulate a broken registrion from # a previous commit base.registerUtility(component=None, provided=broken_iface) transaction.savepoint(optimistic=True) del sys.modules[broken_iface.__module__] fol.setTitle("v2") # If an attempt was made to pickle the parent registry's # broken registration we would see an error here portal_repo.save(fol) self.assertEqual(self.portal.fol.Title(), "v2") self.assertTrue( self.portal.fol.getSiteManager().__bases__[0] is base)
def classFactory(self, connection, modulename, globalname): modulename = re.sub(r'^IndexedCatalog\.BTrees\.', 'BTrees.', modulename) if globalname == 'PersistentMapping': modulename = 'persistent.mapping' elif globalname == 'PersistentList': modulename = 'persistent.list' return find_global(modulename, globalname, Broken=NotBroken)
def rebuildBrokenInterface(iface): if isinstance(iface, type) and issubclass(iface, Broken): broken_cache.pop((iface.__module__, iface.__name__,), None) return find_global( iface.__module__, iface.__name__, Broken=IBroken, type=InterfaceClass) return iface
def classFactory(self, connection, modulename, globalname): modulename = re.sub(r'^IndexedCatalog\.BTrees\.', 'BTrees.', modulename) if globalname == 'PersistentMapping': modulename = 'persistent.mapping' elif globalname == 'PersistentList': modulename = 'persistent.list' elif globalname == 'LDAPGroupWrapper': modulename = 'indico_migrate.zodb_objects' globalname = 'LDAPGroupWrapper' return find_global(modulename, globalname, Broken=NotBroken)
def classFactory(self, connection, modulename, globalname): modulename = re.sub(r'^IndexedCatalog\.BTrees\.', 'BTrees.', modulename) if globalname == 'PersistentMapping': modulename = 'persistent.mapping' elif globalname == 'PersistentList': modulename = 'persistent.list' elif globalname == 'Avatar': modulename = 'indico.modules.users.legacy' globalname = 'AvatarUserWrapper' elif globalname == 'Group': modulename = 'indico.modules.groups.legacy' globalname = 'LocalGroupWrapper' elif globalname == 'LDAPGroup': modulename = 'indico.modules.groups.legacy' globalname = 'LDAPGroupWrapper' elif globalname == 'CERNGroup': modulename = 'indico.modules.groups.legacy' globalname = 'LDAPGroupWrapper' return find_global(modulename, globalname, Broken=NotBroken)
def classFactory(self, connection, modulename, globalname): modulename = re.sub(r'^IndexedCatalog\.BTrees\.', 'BTrees.', modulename) if globalname == 'PersistentMapping': modulename = 'persistent.mapping' elif globalname == 'PersistentList': modulename = 'persistent.list' elif globalname in ('Avatar', 'AvatarUserWrapper'): modulename = 'indico_zodbimport.zodb_objects' globalname = 'AvatarUserWrapper' elif globalname in ('Group', 'LocalGroupWrapper'): modulename = 'indico_zodbimport.zodb_objects' globalname = 'LocalGroupWrapper' elif globalname in ('LDAPGroup', 'LDAPGroupWrapper'): modulename = 'indico_zodbimport.zodb_objects' globalname = 'LDAPGroupWrapper' elif globalname in ('CERNGroup', 'LDAPGroupWrapper'): modulename = 'indico_zodbimport.zodb_objects' globalname = 'LDAPGroupWrapper' return find_global(modulename, globalname, Broken=NotBroken)
def classFactory(self, connection, modulename, globalname): # Zope will rebind this method to arbitrary user code at runtime. return find_global(modulename, globalname)
def classFactory(self, connection, modulename, globalname): modulename = re.sub(r'^IndexedCatalog\.BTrees\.', 'BTrees.', modulename) return find_global(modulename, globalname, Broken=NotBroken)
def classFactory(self, connection, modulename, globalname): return find_global(modulename, globalname, Broken=NotBroken)
def test_server_side(self): # First, verify default conflict resolution. server = StorageServer(self, DemoStorage()) zs = server.zs reader = serialize.ObjectReader( factory=lambda conn, *args: find_global(*args)) writer = serialize.ObjectWriter() ob = Length(0) ob._p_oid = z64 # 2 non-conflicting transactions: zs.tpc_begin(1, '', '', {}) zs.storea(ob._p_oid, z64, writer.serialize(ob), 1) self.assertEqual(zs.vote(1), []) tid1 = server.unpack_result(zs.tpc_finish(1)) server.assert_calls(self, ('info', {'length': 1, 'size': Var()})) ob.change(1) zs.tpc_begin(2, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 2) self.assertEqual(zs.vote(2), []) tid2 = server.unpack_result(zs.tpc_finish(2)) server.assert_calls(self, ('info', {'size': Var(), 'length': 1})) # Now, a cnflicting one: zs.tpc_begin(3, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 3) # Vote returns the object id, indicating that a conflict was resolved. self.assertEqual(zs.vote(3), [ob._p_oid]) tid3 = server.unpack_result(zs.tpc_finish(3)) p, serial, next_serial = zs.loadBefore(ob._p_oid, maxtid) self.assertEqual((serial, next_serial), (tid3, None)) self.assertEqual(reader.getClassName(p), 'BTrees.Length.Length') self.assertEqual(reader.getState(p), 2) # Now, we'll create a server that expects the client to # resolve conflicts: server = StorageServer(self, DemoStorage(), client_conflict_resolution=True) zs = server.zs # 2 non-conflicting transactions: zs.tpc_begin(1, '', '', {}) zs.storea(ob._p_oid, z64, writer.serialize(ob), 1) self.assertEqual(zs.vote(1), []) tid1 = server.unpack_result(zs.tpc_finish(1)) server.assert_calls(self, ('info', {'size': Var(), 'length': 1})) ob.change(1) zs.tpc_begin(2, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 2) self.assertEqual(zs.vote(2), []) tid2 = server.unpack_result(zs.tpc_finish(2)) server.assert_calls(self, ('info', {'length': 1, 'size': Var()})) # Now, a conflicting one: zs.tpc_begin(3, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 3) # Vote returns an object, indicating that a conflict was not resolved. self.assertEqual( zs.vote(3), [ dict( oid=ob._p_oid, serials=(tid2, tid1), data=writer.serialize(ob), ) ], ) # Now, it's up to the client to resolve the conflict. It can # do this by making another store call. In this call, we use # tid2 as the starting tid: ob.change(1) zs.storea(ob._p_oid, tid2, writer.serialize(ob), 3) self.assertEqual(zs.vote(3), []) tid3 = server.unpack_result(zs.tpc_finish(3)) server.assert_calls(self, ('info', {'size': Var(), 'length': 1})) p, serial, next_serial = zs.loadBefore(ob._p_oid, maxtid) self.assertEqual((serial, next_serial), (tid3, None)) self.assertEqual(reader.getClassName(p), 'BTrees.Length.Length') self.assertEqual(reader.getState(p), 3)
def test_server_side(self): # First, verify default conflict resolution. server = StorageServer(self, DemoStorage()) zs = server.zs reader = serialize.ObjectReader( factory=lambda conn, *args: find_global(*args)) writer = serialize.ObjectWriter() ob = Length(0) ob._p_oid = z64 # 2 non-conflicting transactions: zs.tpc_begin(1, '', '', {}) zs.storea(ob._p_oid, z64, writer.serialize(ob), 1) self.assertEqual(zs.vote(1), []) tid1 = server.unpack_result(zs.tpc_finish(1)) server.assert_calls(self, ('info', {'length': 1, 'size': Var()})) ob.change(1) zs.tpc_begin(2, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 2) self.assertEqual(zs.vote(2), []) tid2 = server.unpack_result(zs.tpc_finish(2)) server.assert_calls(self, ('info', {'size': Var(), 'length': 1})) # Now, a cnflicting one: zs.tpc_begin(3, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 3) # Vote returns the object id, indicating that a conflict was resolved. self.assertEqual(zs.vote(3), [ob._p_oid]) tid3 = server.unpack_result(zs.tpc_finish(3)) p, serial, next_serial = zs.loadBefore(ob._p_oid, maxtid) self.assertEqual((serial, next_serial), (tid3, None)) self.assertEqual(reader.getClassName(p), 'BTrees.Length.Length') self.assertEqual(reader.getState(p), 2) # Now, we'll create a server that expects the client to # resolve conflicts: server = StorageServer( self, DemoStorage(), client_conflict_resolution=True) zs = server.zs # 2 non-conflicting transactions: zs.tpc_begin(1, '', '', {}) zs.storea(ob._p_oid, z64, writer.serialize(ob), 1) self.assertEqual(zs.vote(1), []) tid1 = server.unpack_result(zs.tpc_finish(1)) server.assert_calls(self, ('info', {'size': Var(), 'length': 1})) ob.change(1) zs.tpc_begin(2, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 2) self.assertEqual(zs.vote(2), []) tid2 = server.unpack_result(zs.tpc_finish(2)) server.assert_calls(self, ('info', {'length': 1, 'size': Var()})) # Now, a conflicting one: zs.tpc_begin(3, '', '', {}) zs.storea(ob._p_oid, tid1, writer.serialize(ob), 3) # Vote returns an object, indicating that a conflict was not resolved. self.assertEqual( zs.vote(3), [dict(oid=ob._p_oid, serials=(tid2, tid1), data=writer.serialize(ob), )], ) # Now, it's up to the client to resolve the conflict. It can # do this by making another store call. In this call, we use # tid2 as the starting tid: ob.change(1) zs.storea(ob._p_oid, tid2, writer.serialize(ob), 3) self.assertEqual(zs.vote(3), []) tid3 = server.unpack_result(zs.tpc_finish(3)) server.assert_calls(self, ('info', {'size': Var(), 'length': 1})) p, serial, next_serial = zs.loadBefore(ob._p_oid, maxtid) self.assertEqual((serial, next_serial), (tid3, None)) self.assertEqual(reader.getClassName(p), 'BTrees.Length.Length') self.assertEqual(reader.getState(p), 3)
def find_broken_global(*symb): result = find_global(*symb) if is_broken(result): logger.warning('Broken ZODB object for %s.', u' '.join(symb)) create_broken_module_for(result) return result