Example #1
0
 def __init__(self, methodName='runTest'):
     super(SQLDump, self).__init__(methodName)
     if DB_TYPE[0] == POSTGRES_DIALECT:
         self.testStoreBuilder = theStoreBuilder
     else:
         self.testStoreBuilder = SQLStoreBuilder(dsnUser="******",
                                                 noCleanup=True)
Example #2
0
 def __init__(self, methodName='runTest'):
     txweb2.dav.test.util.TestCase.__init__(self, methodName)
     while len(self.theStoreBuilders) < self.numberOfStores:
         self.theStoreBuilders.append(
             SQLStoreBuilder(count=len(self.theStoreBuilders)))
     self.theStores = [None] * self.numberOfStores
     self.activeTransactions = [None] * self.numberOfStores
Example #3
0
class SQLDump(TestCase):
    """
    Tests for L{sql_dump}.
    """

    def __init__(self, methodName='runTest'):
        super(SQLDump, self).__init__(methodName)
        if DB_TYPE[0] == POSTGRES_DIALECT:
            self.testStoreBuilder = theStoreBuilder
        else:
            self.testStoreBuilder = SQLStoreBuilder(dsnUser="******", noCleanup=True)


    @inlineCallbacks
    def setUp(self):
        TestCase.setUp(self)

        self.store = yield self.testStoreBuilder.buildStore(
            self, {"push": StubNotifierFactory()}, enableJobProcessing=False
        )


    @inlineCallbacks
    def cleanUp(self):
        startTxn = self.store.newTransaction("test_dbUpgrades")
        if startTxn.dialect == POSTGRES_DIALECT:
            yield startTxn.execSQL("set search_path to public")
            yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
        else:
            yield cleanDatabase(startTxn)

        yield startTxn.commit()


    @inlineCallbacks
    def _loadSchema(self, schema):
        """
        Use the postgres schema mechanism to do tests under a separate "namespace"
        in postgres that we can quickly wipe clean afterwards.
        """
        startTxn = self.store.newTransaction("test_dbUpgrades")
        if startTxn.dialect == POSTGRES_DIALECT:
            yield startTxn.execSQL("create schema test_dbUpgrades")
            yield startTxn.execSQL("set search_path to test_dbUpgrades")
        yield startTxn.execSQLBlock(schema)
        yield startTxn.commit()

        self.addCleanup(self.cleanUp)


    @inlineCallbacks
    def _schemaCheck(self, schema, schema_bad):

        # Load old schema and populate with data
        yield self._loadSchema(schema)

        txn = self.store.newTransaction("loadData")
        dumped = yield dumpSchema(txn, "test", schemaname="test_dbUpgrades")
        yield txn.commit()

        parsed = schemaFromString(schema)
        self.assertEqual(parsed.compare(dumped), [])

        parsed_bad = schemaFromString(schema_bad)
        self.assertNotEqual(parsed_bad.compare(dumped), [])


    @inlineCallbacks
    def test_pkey_column(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 integer not null
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 integer
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_pkey_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,

    primary key (ID1)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    primary key (ID1)
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_multiple_pkey_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,
    ID3 integer not null,

    primary key (ID1, ID2)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,
    ID3 integer,

    primary key (ID1, ID2)
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_unique_column(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer unique,
    ID2 integer not null
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer unique,
    ID2 integer
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_unique_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer not null,

    unique (ID1)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    unique (ID1)
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_multiple_unique_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,
    ID3 integer not null,

    unique (ID1, ID2)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,
    ID3 integer,

    unique (ID1, ID2)
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_timestamp_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 timestamp default timezone('UTC', CURRENT_TIMESTAMP)
);
""" if DB_TYPE[0] == POSTGRES_DIALECT else """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer primary key default 0,
    ID2 timestamp
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_references_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 text default null
);
CREATE TABLE BAR (
    ID1 integer references FOO on delete cascade,
    ID2 integer
);
CREATE TABLE BAZ (
    ID1 integer references FOO,
    ID2 integer
);
""" if DB_TYPE[0] == POSTGRES_DIALECT else """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 nclob default null
);
CREATE TABLE BAR (
    ID1 integer references FOO on delete cascade,
    ID2 integer
);
CREATE TABLE BAZ (
    ID1 integer references FOO,
    ID2 integer
);
"""
        schema_bad = """
CREATE TABLE FOO (
    ID1 integer primary key default 0,
    ID2 timestamp
);
CREATE TABLE BAR (
    ID1 integer references FOO,
    ID2 integer
);
CREATE TABLE BAZ (
    ID1 integer references FOO on delete cascade,
    ID2 integer
);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_index_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,

    primary key (ID1)
);

create index FOOINDEX on FOO (ID1, ID2);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    primary key (ID1)
);
create index FOOINDEX on FOO (ID2, ID1);
"""

        yield self._schemaCheck(schema, schema_bad)


    @inlineCallbacks
    def test_unique_index_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,

    primary key (ID1)
);

create unique index FOOINDEX on FOO(ID1, ID2);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    primary key (ID1)
);
create index FOOINDEX on FOO(ID1, ID2);
"""

        yield self._schemaCheck(schema, schema_bad)
Example #4
0
 def createStore(self, delegateTo):
     """
     Create a store and pass it to the named delegate class.
     """
     swapAMP(self, namedAny(delegateTo)(SQLStoreBuilder.childStore()))
     return {}
Example #5
0
 def __init__(self, methodName='runTest'):
     super(SchemaUpgradeTests, self).__init__(methodName)
     if DB_TYPE[0] == POSTGRES_DIALECT:
         self.testStoreBuilder = theStoreBuilder
     else:
         self.testStoreBuilder = SQLStoreBuilder(dsnUser="******", noCleanup=True)
Example #6
0
class SchemaUpgradeTests(TestCase):
    """
    Tests for L{UpgradeDatabaseSchemaStep}.
    """

    def __init__(self, methodName='runTest'):
        super(SchemaUpgradeTests, self).__init__(methodName)
        if DB_TYPE[0] == POSTGRES_DIALECT:
            self.testStoreBuilder = theStoreBuilder
        else:
            self.testStoreBuilder = SQLStoreBuilder(dsnUser="******", noCleanup=True)


    @staticmethod
    def _getRawSchemaVersion(fp, versionKey):
        schema = fp.getContent()
        found = re.search("insert into CALENDARSERVER (\(NAME, VALUE\) )?values \('%s', '(\d+)'\);" % (versionKey,), schema)
        return int(found.group(2)) if found else None


    def _getSchemaVersion(self, fp, versionKey):
        found = SchemaUpgradeTests._getRawSchemaVersion(fp, versionKey)
        if found is None:
            if versionKey == "VERSION":
                self.fail("Could not determine schema version for: %s" % (fp,))
            else:
                return 1
        return found


    def test_scanUpgradeFiles(self):

        upgrader = UpgradeDatabaseSchemaStep(None)

        upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema1")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        self.assertEqual(
            files,
            [(3, 4, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql"))],
        )

        upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema2")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        self.assertEqual(
            files,
            [
                (3, 4, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql")),
                (3, 5, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_5.sql")),
                (4, 5, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql")),
            ]
        )


    def test_determineUpgradeSequence(self):

        upgrader = UpgradeDatabaseSchemaStep(None)

        upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema1")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        upgrades = upgrader.determineUpgradeSequence(3, 4, files, "fake_dialect")
        self.assertEqual(
            upgrades,
            [upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql")],
        )
        self.assertRaises(RuntimeError, upgrader.determineUpgradeSequence, 3, 5, files, "fake_dialect")

        upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema2")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        upgrades = upgrader.determineUpgradeSequence(3, 5, files, "fake_dialect")
        self.assertEqual(
            upgrades,
            [upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_5.sql")]
        )
        upgrades = upgrader.determineUpgradeSequence(4, 5, files, "fake_dialect")
        self.assertEqual(
            upgrades,
            [upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql")]
        )

        upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema3")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        upgrades = upgrader.determineUpgradeSequence(3, 5, files, "fake_dialect")
        self.assertEqual(
            upgrades,
            [
                upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql"),
                upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql"),
            ]
        )


    def test_upgradeAvailability(self):
        """
        Make sure that each old schema has a valid upgrade path to the current one.
        """

        for dialect in (POSTGRES_DIALECT, ORACLE_DIALECT,):
            upgrader = UpgradeDatabaseSchemaStep(None)
            files = upgrader.scanForUpgradeFiles(dialect)

            current_version = self._getSchemaVersion(upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")

            for child in upgrader.schemaLocation.child("old").child(dialect).globChildren("*.sql"):
                old_version = self._getSchemaVersion(child, "VERSION")
                upgrades = upgrader.determineUpgradeSequence(old_version, current_version, files, dialect)
                self.assertNotEqual(len(upgrades), 0)

#    def test_upgradeDataAvailability(self):
#        """
#        Make sure that each upgrade file has a valid data upgrade file or None.
#        """
#
#        for dialect in (POSTGRES_DIALECT, ORACLE_DIALECT,):
#            upgrader = UpgradeDatabaseSchemaStep(None)
#            files = upgrader.scanForUpgradeFiles(dialect)
#            for _ignore_from, _ignore_to, fp in files:
#                result = upgrader.getDataUpgrade(fp)
#                if result is not None:
#                    self.assertIsInstance(result, types.FunctionType)


    @inlineCallbacks
    def _dbSchemaUpgrades(self, child):
        """
        This does a full DB test of all possible upgrade paths. For each old schema, it loads it into the DB
        then runs the upgrade service. This ensures all the upgrade.sql files work correctly - at least for
        postgres.
        """

        store = yield self.testStoreBuilder.buildStore(
            self, {"push": StubNotifierFactory()}, enableJobProcessing=False
        )

        @inlineCallbacks
        def _loadOldSchema(path):
            """
            Use the postgres schema mechanism to do tests under a separate "namespace"
            in postgres that we can quickly wipe clean afterwards.
            """
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("create schema test_dbUpgrades")
                yield startTxn.execSQL("set search_path to test_dbUpgrades")
            yield startTxn.execSQLBlock(path.getContent())
            yield startTxn.commit()

        @inlineCallbacks
        def _loadVersion():
            startTxn = store.newTransaction("test_dbUpgrades")
            new_version = yield startTxn.execSQL("select value from calendarserver where name = 'VERSION'")
            yield startTxn.commit()
            returnValue(int(new_version[0][0]))

        @inlineCallbacks
        def _loadSchemaFromDatabase():
            startTxn = store.newTransaction("test_dbUpgrades")
            schema = yield dumpSchema(startTxn, "Upgraded from %s" % (child.basename(),), "test_dbUpgrades")
            yield startTxn.commit()
            returnValue(schema)

        @inlineCallbacks
        def _unloadOldSchema():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
            elif startTxn.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        @inlineCallbacks
        def _cleanupOldSchema():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade")
            elif startTxn.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        self.addCleanup(_cleanupOldSchema)

        test_upgrader = UpgradeDatabaseSchemaStep(None)
        expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")

        # Upgrade allowed
        upgrader = UpgradeDatabaseSchemaStep(store)
        yield _loadOldSchema(child)
        yield upgrader.databaseUpgrade()
        new_version = yield _loadVersion()

        # Compare the upgraded schema with the expected current schema
        new_schema = yield _loadSchemaFromDatabase()
        currentSchema = schemaFromPath(test_upgrader.schemaLocation.child(DB_TYPE[2]))
        mismatched = currentSchema.compare(new_schema)
        # These are special case exceptions
        for i in (
            "Table: CALENDAR_HOME, column name DATAVERSION default mismatch",
            "Table: CALENDAR_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
            "Table: ADDRESSBOOK_HOME, column name DATAVERSION default mismatch",
            "Table: ADDRESSBOOK_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
            "Table: PUSH_NOTIFICATION_WORK, column name PUSH_PRIORITY default mismatch",
        ):
            try:
                mismatched.remove(i)
            except ValueError:
                pass
        self.assertEqual(len(mismatched), 0, "Schema mismatch:\n" + "\n".join(mismatched))

        yield _unloadOldSchema()

        self.assertEqual(new_version, expected_version)

        # Upgrade disallowed
        upgrader = UpgradeDatabaseSchemaStep(store, failIfUpgradeNeeded=True)
        yield _loadOldSchema(child)
        old_version = yield _loadVersion()
        try:
            yield upgrader.databaseUpgrade()
        except NotAllowedToUpgrade:
            pass
        except Exception:
            self.fail("NotAllowedToUpgrade not raised")
        else:
            self.fail("NotAllowedToUpgrade not raised")
        new_version = yield _loadVersion()
        yield _unloadOldSchema()

        self.assertEqual(old_version, new_version)


    @inlineCallbacks
    def _dbDataUpgrades(self, version, versionKey, upgraderClass):
        """
        This does a full DB test of all possible data upgrade paths. For each old schema, it loads it into the DB
        then runs the data upgrade service. This ensures all the upgrade_XX.py files work correctly - at least for
        postgres.

        TODO: this currently does not create any data to test with. It simply runs the upgrade on an empty
        store.
        """

        store = yield self.testStoreBuilder.buildStore(
            self, {"push": StubNotifierFactory()}, enableJobProcessing=False
        )

        @inlineCallbacks
        def _loadOldData(path, oldVersion):
            """
            Use the postgres schema mechanism to do tests under a separate "namespace"
            in postgres that we can quickly wipe clean afterwards.
            """
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("create schema test_dbUpgrades")
                yield startTxn.execSQL("set search_path to test_dbUpgrades")
            yield startTxn.execSQLBlock(path.getContent())
            yield startTxn.execSQL("update CALENDARSERVER set VALUE = '%s' where NAME = '%s'" % (oldVersion, versionKey,))
            yield startTxn.commit()

        @inlineCallbacks
        def _loadVersion():
            startTxn = store.newTransaction("test_dbUpgrades")
            new_version = yield startTxn.execSQL("select value from calendarserver where name = '%s'" % (versionKey,))
            yield startTxn.commit()
            returnValue(int(new_version[0][0]))

        @inlineCallbacks
        def _unloadOldData():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
            elif startTxn.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        @inlineCallbacks
        def _cleanupOldData():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade")
            elif startTxn.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        self.addCleanup(_cleanupOldData)

        test_upgrader = UpgradeDatabaseSchemaStep(None)
        expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child(DB_TYPE[2]), versionKey)

        oldVersion = version
        upgrader = upgraderClass(store)
        yield _loadOldData(test_upgrader.schemaLocation.child(DB_TYPE[2]), oldVersion)
        yield upgrader.databaseUpgrade()
        new_version = yield _loadVersion()
        yield _unloadOldData()

        self.assertEqual(new_version, expected_version)
class SQLDump(TestCase):
    """
    Tests for L{sql_dump}.
    """

    def __init__(self, methodName="runTest"):
        super(SQLDump, self).__init__(methodName)
        if DB_TYPE[0] == POSTGRES_DIALECT:
            self.testStoreBuilder = theStoreBuilder
        else:
            self.testStoreBuilder = SQLStoreBuilder(dsnUser="******", noCleanup=True)

    @inlineCallbacks
    def setUp(self):
        TestCase.setUp(self)

        self.store = yield self.testStoreBuilder.buildStore(
            self, {"push": StubNotifierFactory()}, enableJobProcessing=False
        )

    @inlineCallbacks
    def cleanUp(self):
        startTxn = self.store.newTransaction("test_dbUpgrades")
        if startTxn.dialect == POSTGRES_DIALECT:
            yield startTxn.execSQL("set search_path to public")
            yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
        else:
            yield cleanDatabase(startTxn)

        yield startTxn.commit()

    @inlineCallbacks
    def _loadSchema(self, schema):
        """
        Use the postgres schema mechanism to do tests under a separate "namespace"
        in postgres that we can quickly wipe clean afterwards.
        """
        startTxn = self.store.newTransaction("test_dbUpgrades")
        if startTxn.dialect == POSTGRES_DIALECT:
            yield startTxn.execSQL("create schema test_dbUpgrades")
            yield startTxn.execSQL("set search_path to test_dbUpgrades")
        yield startTxn.execSQLBlock(schema)
        yield startTxn.commit()

        self.addCleanup(self.cleanUp)

    @inlineCallbacks
    def _schemaCheck(self, schema, schema_bad):

        # Load old schema and populate with data
        yield self._loadSchema(schema)

        txn = self.store.newTransaction("loadData")
        dumped = yield dumpSchema(txn, "test", schemaname="test_dbUpgrades")
        yield txn.commit()

        parsed = schemaFromString(schema)
        self.assertEqual(parsed.compare(dumped), [])

        parsed_bad = schemaFromString(schema_bad)
        self.assertNotEqual(parsed_bad.compare(dumped), [])

    @inlineCallbacks
    def test_pkey_column(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 integer not null
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 integer
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_pkey_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,

    primary key (ID1)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    primary key (ID1)
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_multiple_pkey_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,
    ID3 integer not null,

    primary key (ID1, ID2)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,
    ID3 integer,

    primary key (ID1, ID2)
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_unique_column(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer unique,
    ID2 integer not null
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer unique,
    ID2 integer
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_unique_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer not null,

    unique (ID1)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    unique (ID1)
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_multiple_unique_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,
    ID3 integer not null,

    unique (ID1, ID2)
);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,
    ID3 integer,

    unique (ID1, ID2)
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_timestamp_table(self):

        schema = (
            """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 timestamp default timezone('UTC', CURRENT_TIMESTAMP)
);
"""
            if DB_TYPE[0] == POSTGRES_DIALECT
            else """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 timestamp default CURRENT_TIMESTAMP at time zone 'UTC'
);
"""
        )

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer primary key default 0,
    ID2 timestamp
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_references_table(self):

        schema = (
            """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 text default null
);
CREATE TABLE BAR (
    ID1 integer references FOO on delete cascade,
    ID2 integer
);
CREATE TABLE BAZ (
    ID1 integer references FOO,
    ID2 integer
);
"""
            if DB_TYPE[0] == POSTGRES_DIALECT
            else """
CREATE TABLE FOO (
    ID1 integer primary key,
    ID2 nclob default null
);
CREATE TABLE BAR (
    ID1 integer references FOO on delete cascade,
    ID2 integer
);
CREATE TABLE BAZ (
    ID1 integer references FOO,
    ID2 integer
);
"""
        )
        schema_bad = """
CREATE TABLE FOO (
    ID1 integer primary key default 0,
    ID2 timestamp
);
CREATE TABLE BAR (
    ID1 integer references FOO,
    ID2 integer
);
CREATE TABLE BAZ (
    ID1 integer references FOO on delete cascade,
    ID2 integer
);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_index_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,

    primary key (ID1)
);

create index FOOINDEX on FOO (ID1, ID2);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    primary key (ID1)
);
create index FOOINDEX on FOO (ID2, ID1);
"""

        yield self._schemaCheck(schema, schema_bad)

    @inlineCallbacks
    def test_unique_index_table(self):

        schema = """
CREATE TABLE FOO (
    ID1 integer not null,
    ID2 integer not null,

    primary key (ID1)
);

create unique index FOOINDEX on FOO(ID1, ID2);
"""

        schema_bad = """
CREATE TABLE FOO (
    ID1 integer,
    ID2 integer,

    primary key (ID1)
);
create index FOOINDEX on FOO(ID1, ID2);
"""

        yield self._schemaCheck(schema, schema_bad)
Example #8
0
 def createStore(self, delegateTo):
     """
     Create a store and pass it to the named delegate class.
     """
     swapAMP(self, namedAny(delegateTo)(SQLStoreBuilder.childStore()))
     return {}
Example #9
0
class SchemaUpgradeTests(TestCase):
    """
    Tests for L{UpgradeDatabaseSchemaStep}.
    """
    def __init__(self, methodName='runTest'):
        super(SchemaUpgradeTests, self).__init__(methodName)
        if DB_TYPE[0] == POSTGRES_DIALECT:
            self.testStoreBuilder = theStoreBuilder
        else:
            self.testStoreBuilder = SQLStoreBuilder(dsnUser="******",
                                                    noCleanup=True)

    @staticmethod
    def _getRawSchemaVersion(fp, versionKey):
        schema = fp.getContent()
        found = re.search(
            "insert into CALENDARSERVER (\(NAME, VALUE\) )?values \('%s', '(\d+)'\);"
            % (versionKey, ), schema)
        return int(found.group(2)) if found else None

    def _getSchemaVersion(self, fp, versionKey):
        found = SchemaUpgradeTests._getRawSchemaVersion(fp, versionKey)
        if found is None:
            if versionKey == "VERSION":
                self.fail("Could not determine schema version for: %s" %
                          (fp, ))
            else:
                return 1
        return found

    def test_scanUpgradeFiles(self):

        upgrader = UpgradeDatabaseSchemaStep(None)

        upgrader.schemaLocation = getModule(__name__).filePath.sibling(
            "fake_schema1")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        self.assertEqual(
            files,
            [(3, 4, upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_3_to_4.sql"))],
        )

        upgrader.schemaLocation = getModule(__name__).filePath.sibling(
            "fake_schema2")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        self.assertEqual(files, [
            (3, 4, upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_3_to_4.sql")),
            (3, 5, upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_3_to_5.sql")),
            (4, 5, upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_4_to_5.sql")),
        ])

    def test_determineUpgradeSequence(self):

        upgrader = UpgradeDatabaseSchemaStep(None)

        upgrader.schemaLocation = getModule(__name__).filePath.sibling(
            "fake_schema1")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        upgrades = upgrader.determineUpgradeSequence(3, 4, files,
                                                     "fake_dialect")
        self.assertEqual(
            upgrades,
            [
                upgrader.schemaLocation.child("upgrades").child(
                    "fake_dialect").child("upgrade_from_3_to_4.sql")
            ],
        )
        self.assertRaises(RuntimeError, upgrader.determineUpgradeSequence, 3,
                          5, files, "fake_dialect")

        upgrader.schemaLocation = getModule(__name__).filePath.sibling(
            "fake_schema2")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        upgrades = upgrader.determineUpgradeSequence(3, 5, files,
                                                     "fake_dialect")
        self.assertEqual(upgrades, [
            upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_3_to_5.sql")
        ])
        upgrades = upgrader.determineUpgradeSequence(4, 5, files,
                                                     "fake_dialect")
        self.assertEqual(upgrades, [
            upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_4_to_5.sql")
        ])

        upgrader.schemaLocation = getModule(__name__).filePath.sibling(
            "fake_schema3")
        files = upgrader.scanForUpgradeFiles("fake_dialect")
        upgrades = upgrader.determineUpgradeSequence(3, 5, files,
                                                     "fake_dialect")
        self.assertEqual(upgrades, [
            upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_3_to_4.sql"),
            upgrader.schemaLocation.child("upgrades").child(
                "fake_dialect").child("upgrade_from_4_to_5.sql"),
        ])

    def test_upgradeAvailability(self):
        """
        Make sure that each old schema has a valid upgrade path to the current one.
        """

        for dialect in (
                POSTGRES_DIALECT,
                ORACLE_DIALECT,
        ):
            upgrader = UpgradeDatabaseSchemaStep(None)
            files = upgrader.scanForUpgradeFiles(dialect)

            current_version = self._getSchemaVersion(
                upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")

            for child in upgrader.schemaLocation.child("old").child(
                    dialect).globChildren("*.sql"):
                old_version = self._getSchemaVersion(child, "VERSION")
                upgrades = upgrader.determineUpgradeSequence(
                    old_version, current_version, files, dialect)
                self.assertNotEqual(len(upgrades), 0)


#    def test_upgradeDataAvailability(self):
#        """
#        Make sure that each upgrade file has a valid data upgrade file or None.
#        """
#
#        for dialect in (POSTGRES_DIALECT, ORACLE_DIALECT,):
#            upgrader = UpgradeDatabaseSchemaStep(None)
#            files = upgrader.scanForUpgradeFiles(dialect)
#            for _ignore_from, _ignore_to, fp in files:
#                result = upgrader.getDataUpgrade(fp)
#                if result is not None:
#                    self.assertIsInstance(result, types.FunctionType)

    @inlineCallbacks
    def _dbSchemaUpgrades(self, child):
        """
        This does a full DB test of all possible upgrade paths. For each old schema, it loads it into the DB
        then runs the upgrade service. This ensures all the upgrade.sql files work correctly - at least for
        postgres.
        """

        store = yield self.testStoreBuilder.buildStore(
            self, {"push": StubNotifierFactory()}, enableJobProcessing=False)

        @inlineCallbacks
        def _loadOldSchema(path):
            """
            Use the postgres schema mechanism to do tests under a separate "namespace"
            in postgres that we can quickly wipe clean afterwards.
            """
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dbtype.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("create schema test_dbUpgrades")
                yield startTxn.execSQL("set search_path to test_dbUpgrades")
            yield startTxn.execSQLBlock(path.getContent())
            yield startTxn.commit()

        @inlineCallbacks
        def _loadVersion():
            startTxn = store.newTransaction("test_dbUpgrades")
            new_version = yield startTxn.execSQL(
                "select value from calendarserver where name = 'VERSION'")
            yield startTxn.commit()
            returnValue(int(new_version[0][0]))

        @inlineCallbacks
        def _loadSchemaFromDatabase():
            startTxn = store.newTransaction("test_dbUpgrades")
            schema = yield dumpSchema(
                startTxn, "Upgraded from %s" % (child.basename(), ),
                "test_dbUpgrades")
            yield startTxn.commit()
            returnValue(schema)

        @inlineCallbacks
        def _unloadOldSchema():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dbtype.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
            elif startTxn.dbtype.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        @inlineCallbacks
        def _cleanupOldSchema():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dbtype.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL(
                    "drop schema if exists test_dbUpgrades cascade")
            elif startTxn.dbtype.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        self.addCleanup(_cleanupOldSchema)

        test_upgrader = UpgradeDatabaseSchemaStep(None)
        expected_version = self._getSchemaVersion(
            test_upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")

        # Upgrade allowed
        upgrader = UpgradeDatabaseSchemaStep(store)
        yield _loadOldSchema(child)
        yield upgrader.databaseUpgrade()
        new_version = yield _loadVersion()

        # Compare the upgraded schema with the expected current schema
        new_schema = yield _loadSchemaFromDatabase()
        currentSchema = schemaFromPath(
            test_upgrader.schemaLocation.child(DB_TYPE[2]))
        mismatched = currentSchema.compare(new_schema)
        # These are special case exceptions
        for i in (
                "Table: CALENDAR_HOME, column name DATAVERSION default mismatch",
                "Table: CALENDAR_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
                "Table: ADDRESSBOOK_HOME, column name DATAVERSION default mismatch",
                "Table: ADDRESSBOOK_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
                "Table: PUSH_NOTIFICATION_WORK, column name PUSH_PRIORITY default mismatch",
        ):
            try:
                mismatched.remove(i)
            except ValueError:
                pass
        if mismatched and mismatched[0].startswith(
                "Comparing schema: current.sql to Upgraded from"):
            del mismatched[0]
        self.assertEqual(len(mismatched), 0,
                         "Schema mismatch:\n" + "\n".join(mismatched))

        yield _unloadOldSchema()

        self.assertEqual(new_version, expected_version)

        # Upgrade disallowed
        upgrader = UpgradeDatabaseSchemaStep(store, failIfUpgradeNeeded=True)
        yield _loadOldSchema(child)
        old_version = yield _loadVersion()
        try:
            yield upgrader.databaseUpgrade()
        except NotAllowedToUpgrade:
            pass
        except Exception:
            self.fail("NotAllowedToUpgrade not raised")
        else:
            self.fail("NotAllowedToUpgrade not raised")
        new_version = yield _loadVersion()
        yield _unloadOldSchema()

        self.assertEqual(old_version, new_version)

    @inlineCallbacks
    def _dbDataUpgrades(self, version, versionKey, upgraderClass):
        """
        This does a full DB test of all possible data upgrade paths. For each old schema, it loads it into the DB
        then runs the data upgrade service. This ensures all the upgrade_XX.py files work correctly - at least for
        postgres.

        TODO: this currently does not create any data to test with. It simply runs the upgrade on an empty
        store.
        """

        store = yield self.testStoreBuilder.buildStore(
            self, {"push": StubNotifierFactory()}, enableJobProcessing=False)

        @inlineCallbacks
        def _loadOldData(path, oldVersion):
            """
            Use the postgres schema mechanism to do tests under a separate "namespace"
            in postgres that we can quickly wipe clean afterwards.
            """
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dbtype.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("create schema test_dbUpgrades")
                yield startTxn.execSQL("set search_path to test_dbUpgrades")
            yield startTxn.execSQLBlock(path.getContent())
            yield startTxn.execSQL(
                "update CALENDARSERVER set VALUE = '%s' where NAME = '%s'" % (
                    oldVersion,
                    versionKey,
                ))
            yield startTxn.commit()

        @inlineCallbacks
        def _loadVersion():
            startTxn = store.newTransaction("test_dbUpgrades")
            new_version = yield startTxn.execSQL(
                "select value from calendarserver where name = '%s'" %
                (versionKey, ))
            yield startTxn.commit()
            returnValue(int(new_version[0][0]))

        @inlineCallbacks
        def _unloadOldData():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dbtype.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
            elif startTxn.dbtype.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        @inlineCallbacks
        def _cleanupOldData():
            startTxn = store.newTransaction("test_dbUpgrades")
            if startTxn.dbtype.dialect == POSTGRES_DIALECT:
                yield startTxn.execSQL("set search_path to public")
                yield startTxn.execSQL(
                    "drop schema if exists test_dbUpgrades cascade")
            elif startTxn.dbtype.dialect == ORACLE_DIALECT:
                yield cleanDatabase(startTxn)
            yield startTxn.commit()

        self.addCleanup(_cleanupOldData)

        test_upgrader = UpgradeDatabaseSchemaStep(None)
        expected_version = self._getSchemaVersion(
            test_upgrader.schemaLocation.child(DB_TYPE[2]), versionKey)

        oldVersion = version
        upgrader = upgraderClass(store)
        yield _loadOldData(test_upgrader.schemaLocation.child(DB_TYPE[2]),
                           oldVersion)
        yield upgrader.databaseUpgrade()
        new_version = yield _loadVersion()
        yield _unloadOldData()

        self.assertEqual(new_version, expected_version)
Example #10
0
class MultiStoreConduitTest(CommonCommonTests, txweb2.dav.test.util.TestCase):

    theStoreBuilder2 = SQLStoreBuilder(secondary=True)
    otherTransaction = None

    @inlineCallbacks
    def setUp(self):
        yield super(MultiStoreConduitTest, self).setUp()

        # Store 1
        serversDB1 = ServersDB()
        server1a = Server("A", "http://127.0.0.1:8008", "A", True)
        serversDB1.addServer(server1a)
        server1b = Server("B", "http://127.0.0.1:8108", "B", False)
        serversDB1.addServer(server1b)
        yield self.buildStoreAndDirectory(serversDB=serversDB1)
        self.store.queryCacher = None  # Cannot use query caching
        self.store.conduit = self.makeConduit(self.store)

        # Store 2
        serversDB2 = ServersDB()
        server2a = Server("A", "http://127.0.0.1:8008", "A", False)
        serversDB2.addServer(server2a)
        server2b = Server("B", "http://127.0.0.1:8108", "B", True)
        serversDB2.addServer(server2b)

        self.store2 = yield self.buildStore(self.theStoreBuilder2)
        directory2 = buildTestDirectory(self.store2,
                                        self.mktemp(),
                                        serversDB=serversDB2)

        self.store2.setDirectoryService(directory2)
        self.store2.queryCacher = None  # Cannot use query caching
        self.store2.conduit = self.makeConduit(self.store2)

        FakeConduitRequest.addServerStore(server1a, self.store)
        FakeConduitRequest.addServerStore(server2b, self.store2)

    def configure(self):
        super(MultiStoreConduitTest, self).configure()
        self.config.Servers.Enabled = True

    def otherStoreUnderTest(self):
        """
        Return a store for testing.
        """
        return self.store2

    def newOtherTransaction(self):
        assert self.otherTransaction is None
        store2 = self.otherStoreUnderTest()
        txn = store2.newTransaction()

        @inlineCallbacks
        def maybeCommitThis():
            try:
                yield txn.commit()
            except AlreadyFinishedError:
                pass

        self.addCleanup(maybeCommitThis)
        self.otherTransaction = txn
        return self.otherTransaction

    def otherTransactionUnderTest(self):
        if self.otherTransaction is None:
            self.newOtherTransaction()
        return self.otherTransaction

    @inlineCallbacks
    def otherCommit(self):
        assert self.otherTransaction is not None
        yield self.otherTransaction.commit()
        self.otherTransaction = None

    @inlineCallbacks
    def otherAbort(self):
        assert self.otherTransaction is not None
        yield self.otherTransaction.abort()
        self.otherTransaction = None

    def makeConduit(self, store):
        conduit = PoddingConduit(store)
        conduit.conduitRequestClass = FakeConduitRequest
        return conduit

    @inlineCallbacks
    def createShare(self,
                    ownerGUID="user01",
                    shareeGUID="puser02",
                    name="calendar"):

        home = yield self.homeUnderTest(name=ownerGUID, create=True)
        calendar = yield home.calendarWithName(name)
        yield calendar.inviteUserToShare(shareeGUID,
                                         _BIND_MODE_WRITE,
                                         "shared",
                                         shareName="shared-calendar")
        yield self.commit()

        # ACK: home2 is None
        home2 = yield self.homeUnderTest(txn=self.newOtherTransaction(),
                                         name=shareeGUID)
        yield home2.acceptShare("shared-calendar")
        yield self.otherCommit()

        returnValue("shared-calendar")

    @inlineCallbacks
    def removeShare(self,
                    ownerGUID="user01",
                    shareeGUID="puser02",
                    name="calendar"):

        home = yield self.homeUnderTest(name=ownerGUID)
        calendar = yield home.calendarWithName(name)
        yield calendar.uninviteUserFromShare(shareeGUID)
        yield self.commit()