def setUp(self): """ Set up two stores to migrate between. """ # Add some files to the file store. self.filesPath = CachingFilePath(self.mktemp()) self.filesPath.createDirectory() fileStore = self.fileStore = CommonDataStore( self.filesPath, {"push": StubNotifierFactory()}, TestStoreDirectoryService(), True, True ) self.sqlStore = yield theStoreBuilder.buildStore( self, StubNotifierFactory() ) self.upgrader = UpgradeToDatabaseStep(self.fileStore, self.sqlStore) requirements = CommonTests.requirements extras = deriveValue(self, "extraRequirements", lambda t: {}) requirements = self.mergeRequirements(requirements, extras) yield populateCalendarsFrom(requirements, fileStore) md5s = CommonTests.md5s yield resetCalendarMD5s(md5s, fileStore) self.filesPath.child("calendars").child( "__uids__").child("ho").child("me").child("home1").child( ".some-extra-data").setContent("some extra data") requirements = ABCommonTests.requirements yield populateAddressBooksFrom(requirements, fileStore) md5s = ABCommonTests.md5s yield resetAddressBookMD5s(md5s, fileStore) self.filesPath.child("addressbooks").child( "__uids__").child("ho").child("me").child("home1").child( ".some-extra-data").setContent("some extra data")
def setUp(self): """ Set up two stores to migrate between. """ # Add some files to the file store. self.filesPath = CachingFilePath(self.mktemp()) self.filesPath.createDirectory() fileStore = self.fileStore = CommonDataStore( self.filesPath, {"push": StubNotifierFactory()}, TestStoreDirectoryService(), True, True ) self.sqlStore = yield theStoreBuilder.buildStore( self, StubNotifierFactory() ) self.upgrader = UpgradeToDatabaseStep(self.fileStore, self.sqlStore) requirements = CommonTests.requirements extras = deriveValue(self, "extraRequirements", lambda t: {}) requirements = self.mergeRequirements(requirements, extras) yield populateCalendarsFrom(requirements, fileStore) md5s = CommonTests.md5s yield resetCalendarMD5s(md5s, fileStore) self.filesPath.child("calendars").child( "__uids__").child("ho").child("me").child("home1").child( ".some-extra-data").setContent("some extra data") requirements = ABCommonTests.requirements yield populateAddressBooksFrom(requirements, fileStore) md5s = ABCommonTests.md5s yield resetAddressBookMD5s(md5s, fileStore) self.filesPath.child("addressbooks").child( "__uids__").child("ho").child("me").child("home1").child( ".some-extra-data").setContent("some extra data") # Add some properties we want to check get migrated over txn = self.fileStore.newTransaction() home = yield txn.calendarHomeWithUID("home_defaults") cal = yield home.calendarWithName("calendar_1") props = cal.properties() props[PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet)] = caldavxml.SupportedCalendarComponentSet( caldavxml.CalendarComponent(name="VEVENT"), caldavxml.CalendarComponent(name="VTODO"), ) props[PropertyName.fromElement(element.ResourceType)] = element.ResourceType( element.Collection(), caldavxml.Calendar(), ) props[PropertyName.fromElement(customxml.GETCTag)] = customxml.GETCTag.fromString("foobar") inbox = yield home.calendarWithName("inbox") props = inbox.properties() props[PropertyName.fromElement(customxml.CalendarAvailability)] = customxml.CalendarAvailability.fromString(str(self.av1)) props[PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL)] = caldavxml.ScheduleDefaultCalendarURL( element.HRef.fromString("/calendars/__uids__/home_defaults/calendar_1"), ) yield txn.commit()
def setUp(self): """ Set up two stores to migrate between. """ # Add some files to the file store. self.filesPath = CachingFilePath(self.mktemp()) self.filesPath.createDirectory() fileStore = self.fileStore = CommonDataStore( self.filesPath, StubNotifierFactory(), True, True ) self.sqlStore = yield theStoreBuilder.buildStore( self, StubNotifierFactory() ) subStarted = self.subStarted = Deferred() class StubService(Service, object): def startService(self): super(StubService, self).startService() if not subStarted.called: subStarted.callback(None) from twisted.python import log def justOnce(evt): if evt.get('isError') and not hasattr(subStarted, 'result'): subStarted.errback( evt.get('failure', RuntimeError("error starting up (see log)")) ) log.addObserver(justOnce) def cleanObserver(): try: log.removeObserver(justOnce) except ValueError: pass # x not in list, I don't care. self.addCleanup(cleanObserver) self.stubService = StubService() self.topService = MultiService() self.upgrader = self.createUpgradeService() self.upgrader.setServiceParent(self.topService) requirements = CommonTests.requirements extras = deriveValue(self, "extraRequirements", lambda t: {}) requirements = self.mergeRequirements(requirements, extras) yield populateCalendarsFrom(requirements, fileStore) md5s = CommonTests.md5s yield resetCalendarMD5s(md5s, fileStore) self.filesPath.child("calendars").child( "__uids__").child("ho").child("me").child("home1").child( ".some-extra-data").setContent("some extra data") requirements = ABCommonTests.requirements yield populateAddressBooksFrom(requirements, fileStore) md5s = ABCommonTests.md5s yield resetAddressBookMD5s(md5s, fileStore) self.filesPath.child("addressbooks").child( "__uids__").child("ho").child("me").child("home1").child( ".some-extra-data").setContent("some extra data")
def setUp(self): TestCase.setUp(self) test_upgrader = UpgradeDatabaseSchemaStep(None) self.upgradePath = test_upgrader.schemaLocation.child("old").child(POSTGRES_DIALECT) self.currentVersion = self._getSchemaVersion(test_upgrader.schemaLocation.child("current.sql"), "VERSION") self.store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()}, enableJobProcessing=False )
def setUp(self): TestCase.setUp(self) test_upgrader = UpgradeDatabaseSchemaStep(None) self.upgradePath = test_upgrader.schemaLocation.child("old").child( POSTGRES_DIALECT) self.currentVersion = self._getSchemaVersion( test_upgrader.schemaLocation.child("current.sql"), "VERSION") self.store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()}, enableJobProcessing=False)
def _initStore(self, enableManagedAttachments=True): """ Build a store with certain bits cleaned out. """ self.patch(config, "EnableManagedAttachments", enableManagedAttachments) store = yield theStoreBuilder.buildStore(self, {"push": StubNotifierFactory()}) store.enableManagedAttachments = enableManagedAttachments txn = store.newTransaction() cs = schema.CALENDARSERVER yield Delete(From=cs, Where=cs.NAME == "MANAGED-ATTACHMENTS").on(txn) yield txn.commit() returnValue(store)
def _initStore(self, enableManagedAttachments=True): """ Build a store with certain bits cleaned out. """ self.patch(config, "EnableManagedAttachments", enableManagedAttachments) store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()}) store.enableManagedAttachments = enableManagedAttachments txn = store.newTransaction() cs = schema.CALENDARSERVER yield Delete(From=cs, Where=cs.NAME == "MANAGED-ATTACHMENTS").on(txn) yield txn.commit() returnValue(store)
def setUp(self): self.serverRoot = self.mktemp() os.mkdir(self.serverRoot) self.absoluteServerRoot = os.path.abspath(self.serverRoot) configRoot = os.path.join(self.absoluteServerRoot, "Config") if not os.path.exists(configRoot): os.makedirs(configRoot) dataRoot = os.path.join(self.absoluteServerRoot, "Data") if not os.path.exists(dataRoot): os.makedirs(dataRoot) documentRoot = os.path.join(self.absoluteServerRoot, "Documents") if not os.path.exists(documentRoot): os.makedirs(documentRoot) logRoot = os.path.join(self.absoluteServerRoot, "Logs") if not os.path.exists(logRoot): os.makedirs(logRoot) runRoot = os.path.join(self.absoluteServerRoot, "Run") if not os.path.exists(runRoot): os.makedirs(runRoot) config.reset() testRoot = os.path.join(os.path.dirname(__file__), "gateway") templateName = os.path.join(testRoot, "caldavd.plist") with open(templateName) as templateFile: template = templateFile.read() databaseRoot = os.path.abspath("_spawned_scripts_db" + str(os.getpid())) newConfig = template % { "ServerRoot": self.absoluteServerRoot, "DataRoot": dataRoot, "DatabaseRoot": databaseRoot, "DocumentRoot": documentRoot, "ConfigRoot": configRoot, "LogRoot": logRoot, "RunRoot": runRoot, "WritablePlist": os.path.join( os.path.abspath(configRoot), "caldavd-writable.plist" ), } configFilePath = FilePath( os.path.join(configRoot, "caldavd.plist") ) configFilePath.setContent(newConfig) self.configFileName = configFilePath.path config.load(self.configFileName) config.Memcached.Pools.Default.ClientEnabled = False config.Memcached.Pools.Default.ServerEnabled = False ClientFactory.allowTestCache = True memcacher.Memcacher.allowTestCache = True memcacher.Memcacher.reset() config.DirectoryAddressBook.Enabled = False config.UsePackageTimezones = True origUsersFile = FilePath( os.path.join( os.path.dirname(__file__), "gateway", "users-groups.xml" ) ) copyUsersFile = FilePath( os.path.join(config.DataRoot, "accounts.xml") ) origUsersFile.copyTo(copyUsersFile) origResourcesFile = FilePath( os.path.join( os.path.dirname(__file__), "gateway", "resources-locations.xml" ) ) copyResourcesFile = FilePath( os.path.join(config.DataRoot, "resources.xml") ) origResourcesFile.copyTo(copyResourcesFile) origAugmentFile = FilePath( os.path.join( os.path.dirname(__file__), "gateway", "augments.xml" ) ) copyAugmentFile = FilePath(os.path.join(config.DataRoot, "augments.xml")) origAugmentFile.copyTo(copyAugmentFile) self.notifierFactory = StubNotifierFactory() self.store = yield theStoreBuilder.buildStore(self, self.notifierFactory) self.directory = directoryFromConfig(config, self.store)
def _dbDataUpgrades(self, version, versionKey, upgraderClass): """ This does a full DB test of all possible data upgrade paths. For each old schema, it loads it into the DB then runs the data upgrade service. This ensures all the upgrade_XX.py files work correctly - at least for postgres. TODO: this currently does not create any data to test with. It simply runs the upgrade on an empty store. """ store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()} ) @inlineCallbacks def _loadOldData(path, oldVersion): """ Use the postgres schema mechanism to do tests under a separate "namespace" in postgres that we can quickly wipe clean afterwards. """ startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("create schema test_dbUpgrades;") yield startTxn.execSQL("set search_path to test_dbUpgrades;") yield startTxn.execSQL(path.getContent()) yield startTxn.execSQL("update CALENDARSERVER set VALUE = '%s' where NAME = '%s';" % (oldVersion, versionKey,)) yield startTxn.commit() @inlineCallbacks def _loadVersion(): startTxn = store.newTransaction("test_dbUpgrades") new_version = yield startTxn.execSQL("select value from calendarserver where name = '%s';" % (versionKey,)) yield startTxn.commit() returnValue(int(new_version[0][0])) @inlineCallbacks def _unloadOldData(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema test_dbUpgrades cascade;") yield startTxn.commit() @inlineCallbacks def _cleanupOldData(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade;") yield startTxn.commit() self.addCleanup(_cleanupOldData) test_upgrader = UpgradeDatabaseSchemaStep(None) expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child("current.sql"), versionKey) oldVersion = version upgrader = upgraderClass(store) yield _loadOldData(test_upgrader.schemaLocation.child("current.sql"), oldVersion) yield upgrader.databaseUpgrade() new_version = yield _loadVersion() yield _unloadOldData() self.assertEqual(new_version, expected_version)
def _dbSchemaUpgrades(self, child): """ This does a full DB test of all possible upgrade paths. For each old schema, it loads it into the DB then runs the upgrade service. This ensures all the upgrade.sql files work correctly - at least for postgres. """ store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()} ) @inlineCallbacks def _loadOldSchema(path): """ Use the postgres schema mechanism to do tests under a separate "namespace" in postgres that we can quickly wipe clean afterwards. """ startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("create schema test_dbUpgrades;") yield startTxn.execSQL("set search_path to test_dbUpgrades;") yield startTxn.execSQL(path.getContent()) yield startTxn.commit() @inlineCallbacks def _loadVersion(): startTxn = store.newTransaction("test_dbUpgrades") new_version = yield startTxn.execSQL("select value from calendarserver where name = 'VERSION';") yield startTxn.commit() returnValue(int(new_version[0][0])) @inlineCallbacks def _loadSchemaFromDatabase(): startTxn = store.newTransaction("test_dbUpgrades") schema = yield dumpSchema(startTxn, "Upgraded from %s" % (child.basename(),), "test_dbUpgrades") yield startTxn.commit() returnValue(schema) @inlineCallbacks def _unloadOldSchema(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema test_dbUpgrades cascade;") yield startTxn.commit() @inlineCallbacks def _cleanupOldSchema(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade;") yield startTxn.commit() self.addCleanup(_cleanupOldSchema) test_upgrader = UpgradeDatabaseSchemaStep(None) expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child("current.sql"), "VERSION") # Upgrade allowed upgrader = UpgradeDatabaseSchemaStep(store) yield _loadOldSchema(child) yield upgrader.databaseUpgrade() new_version = yield _loadVersion() # Compare the upgraded schema with the expected current schema new_schema = yield _loadSchemaFromDatabase() currentSchema = schemaFromPath(test_upgrader.schemaLocation.child("current.sql")) mismatched = currentSchema.compare(new_schema) self.assertEqual(len(mismatched), 0, "Schema mismatch:\n" + "\n".join(mismatched)) yield _unloadOldSchema() self.assertEqual(new_version, expected_version) # Upgrade disallowed upgrader = UpgradeDatabaseSchemaStep(store, failIfUpgradeNeeded=True) yield _loadOldSchema(child) old_version = yield _loadVersion() try: yield upgrader.databaseUpgrade() except RuntimeError: pass except Exception: self.fail("RuntimeError not raised") else: self.fail("RuntimeError not raised") new_version = yield _loadVersion() yield _unloadOldSchema() self.assertEqual(old_version, new_version)
def buildCalendarStore(testCase, notifierFactory, directoryService=None, homes=None): if directoryService is None: directoryService = buildDirectory(homes=homes) return theStoreBuilder.buildStore(testCase, notifierFactory, directoryService)
def setUp(self): self.serverRoot = self.mktemp() os.mkdir(self.serverRoot) self.absoluteServerRoot = os.path.abspath(self.serverRoot) configRoot = os.path.join(self.absoluteServerRoot, "Config") if not os.path.exists(configRoot): os.makedirs(configRoot) dataRoot = os.path.join(self.absoluteServerRoot, "Data") if not os.path.exists(dataRoot): os.makedirs(dataRoot) documentRoot = os.path.join(self.absoluteServerRoot, "Documents") if not os.path.exists(documentRoot): os.makedirs(documentRoot) logRoot = os.path.join(self.absoluteServerRoot, "Logs") if not os.path.exists(logRoot): os.makedirs(logRoot) runRoot = os.path.join(self.absoluteServerRoot, "Run") if not os.path.exists(runRoot): os.makedirs(runRoot) config.reset() testRoot = os.path.join(os.path.dirname(__file__), "gateway") templateName = os.path.join(testRoot, "caldavd.plist") templateFile = open(templateName) template = templateFile.read() templateFile.close() databaseRoot = os.path.abspath("_spawned_scripts_db" + str(os.getpid())) newConfig = template % { "ServerRoot": self.absoluteServerRoot, "DataRoot": dataRoot, "DatabaseRoot": databaseRoot, "DocumentRoot": documentRoot, "ConfigRoot": configRoot, "LogRoot": logRoot, "RunRoot": runRoot, "WritablePlist": os.path.join( os.path.abspath(configRoot), "caldavd-writable.plist" ), } configFilePath = FilePath( os.path.join(configRoot, "caldavd.plist") ) configFilePath.setContent(newConfig) self.configFileName = configFilePath.path config.load(self.configFileName) config.Memcached.Pools.Default.ClientEnabled = False config.Memcached.Pools.Default.ServerEnabled = False ClientFactory.allowTestCache = True memcacher.Memcacher.allowTestCache = True memcacher.Memcacher.reset() config.DirectoryAddressBook.Enabled = False config.UsePackageTimezones = True origUsersFile = FilePath( os.path.join( os.path.dirname(__file__), "gateway", "users-groups.xml" ) ) copyUsersFile = FilePath( os.path.join(config.DataRoot, "accounts.xml") ) origUsersFile.copyTo(copyUsersFile) origResourcesFile = FilePath( os.path.join( os.path.dirname(__file__), "gateway", "resources-locations.xml" ) ) copyResourcesFile = FilePath( os.path.join(config.DataRoot, "resources.xml") ) origResourcesFile.copyTo(copyResourcesFile) origAugmentFile = FilePath( os.path.join( os.path.dirname(__file__), "gateway", "augments.xml" ) ) copyAugmentFile = FilePath(os.path.join(config.DataRoot, "augments.xml")) origAugmentFile.copyTo(copyAugmentFile) self.notifierFactory = StubNotifierFactory() self.store = yield theStoreBuilder.buildStore(self, self.notifierFactory) self.directory = directoryFromConfig(config, self.store)
def _dbDataUpgrades(self, version, versionKey, upgraderClass): """ This does a full DB test of all possible data upgrade paths. For each old schema, it loads it into the DB then runs the data upgrade service. This ensures all the upgrade_XX.py files work correctly - at least for postgres. TODO: this currently does not create any data to test with. It simply runs the upgrade on an empty store. """ store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()}, enableJobProcessing=False) @inlineCallbacks def _loadOldData(path, oldVersion): """ Use the postgres schema mechanism to do tests under a separate "namespace" in postgres that we can quickly wipe clean afterwards. """ startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("create schema test_dbUpgrades;") yield startTxn.execSQL("set search_path to test_dbUpgrades;") yield startTxn.execSQL(path.getContent()) yield startTxn.execSQL( "update CALENDARSERVER set VALUE = '%s' where NAME = '%s';" % ( oldVersion, versionKey, )) yield startTxn.commit() @inlineCallbacks def _loadVersion(): startTxn = store.newTransaction("test_dbUpgrades") new_version = yield startTxn.execSQL( "select value from calendarserver where name = '%s';" % (versionKey, )) yield startTxn.commit() returnValue(int(new_version[0][0])) @inlineCallbacks def _unloadOldData(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema test_dbUpgrades cascade;") yield startTxn.commit() @inlineCallbacks def _cleanupOldData(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL( "drop schema if exists test_dbUpgrades cascade;") yield startTxn.commit() self.addCleanup(_cleanupOldData) test_upgrader = UpgradeDatabaseSchemaStep(None) expected_version = self._getSchemaVersion( test_upgrader.schemaLocation.child("current.sql"), versionKey) oldVersion = version upgrader = upgraderClass(store) yield _loadOldData(test_upgrader.schemaLocation.child("current.sql"), oldVersion) yield upgrader.databaseUpgrade() new_version = yield _loadVersion() yield _unloadOldData() self.assertEqual(new_version, expected_version)
def _dbSchemaUpgrades(self, child): """ This does a full DB test of all possible upgrade paths. For each old schema, it loads it into the DB then runs the upgrade service. This ensures all the upgrade.sql files work correctly - at least for postgres. """ store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()}, enableJobProcessing=False) @inlineCallbacks def _loadOldSchema(path): """ Use the postgres schema mechanism to do tests under a separate "namespace" in postgres that we can quickly wipe clean afterwards. """ startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("create schema test_dbUpgrades;") yield startTxn.execSQL("set search_path to test_dbUpgrades;") yield startTxn.execSQL(path.getContent()) yield startTxn.commit() @inlineCallbacks def _loadVersion(): startTxn = store.newTransaction("test_dbUpgrades") new_version = yield startTxn.execSQL( "select value from calendarserver where name = 'VERSION';") yield startTxn.commit() returnValue(int(new_version[0][0])) @inlineCallbacks def _loadSchemaFromDatabase(): startTxn = store.newTransaction("test_dbUpgrades") schema = yield dumpSchema( startTxn, "Upgraded from %s" % (child.basename(), ), "test_dbUpgrades") yield startTxn.commit() returnValue(schema) @inlineCallbacks def _unloadOldSchema(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema test_dbUpgrades cascade;") yield startTxn.commit() @inlineCallbacks def _cleanupOldSchema(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL( "drop schema if exists test_dbUpgrades cascade;") yield startTxn.commit() self.addCleanup(_cleanupOldSchema) test_upgrader = UpgradeDatabaseSchemaStep(None) expected_version = self._getSchemaVersion( test_upgrader.schemaLocation.child("current.sql"), "VERSION") # Upgrade allowed upgrader = UpgradeDatabaseSchemaStep(store) yield _loadOldSchema(child) yield upgrader.databaseUpgrade() new_version = yield _loadVersion() # Compare the upgraded schema with the expected current schema new_schema = yield _loadSchemaFromDatabase() currentSchema = schemaFromPath( test_upgrader.schemaLocation.child("current.sql")) mismatched = currentSchema.compare(new_schema) # These are special case exceptions for i in ( "Table: CALENDAR_HOME, column name DATAVERSION default mismatch", "Table: ADDRESSBOOK_HOME, column name DATAVERSION default mismatch", "Table: PUSH_NOTIFICATION_WORK, column name PUSH_PRIORITY default mismatch", ): try: mismatched.remove(i) except ValueError: pass self.assertEqual(len(mismatched), 0, "Schema mismatch:\n" + "\n".join(mismatched)) yield _unloadOldSchema() self.assertEqual(new_version, expected_version) # Upgrade disallowed upgrader = UpgradeDatabaseSchemaStep(store, failIfUpgradeNeeded=True) yield _loadOldSchema(child) old_version = yield _loadVersion() try: yield upgrader.databaseUpgrade() except NotAllowedToUpgrade: pass except Exception: self.fail("NotAllowedToUpgrade not raised") else: self.fail("NotAllowedToUpgrade not raised") new_version = yield _loadVersion() yield _unloadOldSchema() self.assertEqual(old_version, new_version)
def test_dbSchemaUpgrades(self): """ This does a full DB test of all possible upgrade paths. For each old schema, it loads it into the DB then runs the upgrade service. This ensures all the upgrade.sql files work correctly - at least for postgres. """ store = yield theStoreBuilder.buildStore( self, StubNotifierFactory() ) @inlineCallbacks def _loadOldSchema(path): """ Use the postgres schema mechanism to do tests under a separate "namespace" in postgres that we can quickly wipe clean afterwards. """ startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("create schema test_dbUpgrades;") yield startTxn.execSQL("set search_path to test_dbUpgrades;") yield startTxn.execSQL(path.getContent()) yield startTxn.commit() @inlineCallbacks def _loadVersion(): startTxn = store.newTransaction("test_dbUpgrades") new_version = yield startTxn.execSQL("select value from calendarserver where name = 'VERSION';") yield startTxn.commit() returnValue(int(new_version[0][0])) @inlineCallbacks def _unloadOldSchema(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema test_dbUpgrades cascade;") yield startTxn.commit() @inlineCallbacks def _cleanupOldSchema(): startTxn = store.newTransaction("test_dbUpgrades") yield startTxn.execSQL("set search_path to public;") yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade;") yield startTxn.commit() self.addCleanup(_cleanupOldSchema) test_upgrader = UpgradeDatabaseSchemaService(None, None) expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child("current.sql"), "VERSION") for child in test_upgrader.schemaLocation.child("old").child(POSTGRES_DIALECT).globChildren("*.sql"): # Upgrade allowed upgrader = UpgradeDatabaseSchemaService(store, None) yield _loadOldSchema(child) yield upgrader.databaseUpgrade() new_version = yield _loadVersion() yield _unloadOldSchema() self.assertEqual(new_version, expected_version) # Upgrade disallowed upgrader = UpgradeDatabaseSchemaService(store, None, failIfUpgradeNeeded=True, stopOnFail=False) yield _loadOldSchema(child) old_version = yield _loadVersion() try: yield upgrader.databaseUpgrade() except RuntimeError: pass except Exception: self.fail("RuntimeError not raised") else: self.fail("RuntimeError not raised") new_version = yield _loadVersion() yield _unloadOldSchema() self.assertEqual(old_version, new_version)
def setUp(self): TestCase.setUp(self) self.store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()}, enableJobProcessing=False)