コード例 #1
0
    def test_cooperativeLogin(self):
        """
        Verify that the mailbox will be loaded without hanging the server for
        an inordinate period of time.
        """
        qc = QueryCounter(self.store)
        n = []

        def m():
            n.append(self.mailbox._realize())

        self.assertEquals(qc.measure(m), 0)
        [actual] = n
        n[:] = []
        actual.coiterate = lambda x: n.append(x) or Deferred()
        actual.pagesize = 1
        da = self.store.findUnique(DeliveryAgent)
        location = u'extra'

        # this next line initializes the table for pop3, which accounts for a
        # fairly steep startup cost.  TODO: optimize axiom so this isn't as
        # steep.
        self.store.query(MessageInfo).deleteFromStore()
        # Spooky action-at-a-distance stuff: initialize transactional
        # bookkeeping for Message table, I think?  Otherwise there are a few
        # hundred extra bytecodes on the 'bootstrap' which we never see again
        # in subsequent runs of the exact same query.  This actually works
        # whether or not the query is in a transaction: all that is necessary
        # is thata a transaction take place in the store, and that the Message
        # table be queried in some way.  (As you can see, no results need be
        # gathered.)
        self.store.transact(list, self.store.query(Message, limit=0))
        # self.store.transact(lambda : None)

        self.assertEquals(qc.measure(actual.kickoff), 0)
        [tickit] = n
        n[:] = []
        bootstrapBaseline = qc.measure(tickit.next)
        baseline = qc.measure(tickit.next)
        for x in range(2):
            self.store.query(MessageInfo).deleteFromStore()
            # Eliminate all the previously-created message information
            self.assertEquals(qc.measure(actual.kickoff), 0)
            [tickit] = n
            n[:] = []
            self.assertEquals(qc.measure(tickit.next), bootstrapBaseline)
            self.store.transact(makeSomeMessages, self, da, location)
            self.assertEquals(qc.measure(tickit.next), baseline)
            # exhaust it so we can start again
            while True:
                try:
                    # "<=" because the _last_ iteration will be 1 less than all
                    # the previous, due to the successful comparison/exit
                    # instruction
                    self.failUnless(qc.measure(tickit.next) <= baseline)
                except StopIteration:
                    break
コード例 #2
0
 def setUp(self):
     self.store = Store()
     privApp = PrivateApplication(store=self.store)
     installOn(privApp, self.store)
     self.model = TestableInequalityModel(self.store, DataThunkWithIndex,
                                          None, [DataThunkWithIndex.a],
                                          DataThunkWithIndex.a, True,
                                          privApp)
     self.counter = QueryCounter(self.store)
     self.data = []
     for i in range(4):
         self.data.append(DataThunkWithIndex(store=self.store, a=i))
コード例 #3
0
ファイル: test_popout.py プロジェクト: pombredanne/quotient
    def test_cooperativeLogin(self):
        """
        Verify that the mailbox will be loaded without hanging the server for
        an inordinate period of time.
        """
        qc = QueryCounter(self.store)
        n = []
        def m():
            n.append(self.mailbox._realize())
        self.assertEquals(qc.measure(m), 0)
        [actual] = n
        n[:] = []
        actual.coiterate = lambda x: n.append(x) or Deferred()
        actual.pagesize = 1
        da = self.store.findUnique(DeliveryAgent)
        location = u'extra'

        # this next line initializes the table for pop3, which accounts for a
        # fairly steep startup cost.  TODO: optimize axiom so this isn't as
        # steep.
        self.store.query(MessageInfo).deleteFromStore()
        # Spooky action-at-a-distance stuff: initialize transactional
        # bookkeeping for Message table, I think?  Otherwise there are a few
        # hundred extra bytecodes on the 'bootstrap' which we never see again
        # in subsequent runs of the exact same query.  This actually works
        # whether or not the query is in a transaction: all that is necessary
        # is thata a transaction take place in the store, and that the Message
        # table be queried in some way.  (As you can see, no results need be
        # gathered.)
        self.store.transact(list, self.store.query(Message, limit=0))
        # self.store.transact(lambda : None)

        self.assertEquals(qc.measure(actual.kickoff), 0)
        [tickit] = n
        n[:] = []
        bootstrapBaseline = qc.measure(tickit.next)
        baseline = qc.measure(tickit.next)
        for x in range(2):
            self.store.query(MessageInfo).deleteFromStore()
            # Eliminate all the previously-created message information
            self.assertEquals(qc.measure(actual.kickoff), 0)
            [tickit] = n
            n[:] = []
            self.assertEquals(qc.measure(tickit.next), bootstrapBaseline)
            self.store.transact(makeSomeMessages, self, da, location)
            self.assertEquals(qc.measure(tickit.next), baseline)
            # exhaust it so we can start again
            while True:
                try:
                    # "<=" because the _last_ iteration will be 1 less than all
                    # the previous, due to the successful comparison/exit
                    # instruction
                    self.failUnless(qc.measure(tickit.next) <= baseline)
                except StopIteration:
                    break
コード例 #4
0
    def test_performManyProportionalDatabaseWork(self):
        """
        Test that the cost of the SQL executed by  L{Inbox._performManyAct} in
        a single step is independent of the number of messages in the batch
        """
        qc = QueryCounter(self.store)

        measure = lambda: qc.measure(lambda: self.inbox._performManyAct(
            lambda m: None, {},
            self.store.query(Message).paginate(pagesize=2), defer.Deferred()).
                                     next())

        first = measure()

        Message(store=self.store)

        self.assertEquals(first, measure())
コード例 #5
0
    def test_unreadCountComplexityLimit(self):
        """
        Verify that unread counts on arbitrarily large mailboxes only perform
        counting work up to a specified limit.
        """
        # Now make sure the DB's work is limited too, not just the result
        # count.
        halfCount = 5
        countLimit = 2 * halfCount
        self.inboxScreen.countLimit = countLimit

        self.makeMessages(3 * halfCount, read=False, spam=False)
        qc = QueryCounter(self.store)
        m1 = qc.measure(self.unreadCount)

        self.makeMessages(halfCount, read=False, spam=False)
        m2 = qc.measure(self.unreadCount)
        self.assertEqual(m1, m2)
コード例 #6
0
ファイル: test_inbox.py プロジェクト: pombredanne/quotient
    def test_performManyProportionalDatabaseWork(self):
        """
        Test that the cost of the SQL executed by  L{Inbox._performManyAct} in
        a single step is independent of the number of messages in the batch
        """
        qc = QueryCounter(self.store)

        measure = lambda: qc.measure(
            lambda: self.inbox._performManyAct(
                lambda m: None,
                {},
                self.store.query(Message).paginate(pagesize=2),
                defer.Deferred()).next())

        first = measure()

        Message(store=self.store)

        self.assertEquals(first, measure())
コード例 #7
0
ファイル: test_inbox.py プロジェクト: pombredanne/quotient
    def test_unreadCountComplexityLimit(self):
        """
        Verify that unread counts on arbitrarily large mailboxes only perform
        counting work up to a specified limit.
        """
        # Now make sure the DB's work is limited too, not just the result
        # count.
        halfCount = 5
        countLimit = 2 * halfCount
        self.inboxScreen.countLimit = countLimit

        self.makeMessages(3 * halfCount, read=False, spam=False)
        qc = QueryCounter(self.store)
        m1 = qc.measure(self.unreadCount)


        self.makeMessages(halfCount, read=False, spam=False)
        m2 = qc.measure(self.unreadCount)
        self.assertEqual(m1, m2)
コード例 #8
0
ファイル: test_sharing.py プロジェクト: jonathanj/mantissa
    def test_limitEfficiency(self):
        """
        Verify that querying a limited number of shared items does not become
        slower as more items are shared.
        """
        zomg = QueryCounter(self.store)

        for i in range(10):
            self.addSomeThings()

        query = self.store.query(
            PrivateThing, limit=3, sort=PrivateThing.publicData.ascending)
        checkit = lambda : list(sharing.asAccessibleTo(self.bob, query))
        before = zomg.measure(checkit)

        for i in range(10):
            self.addSomeThings()

        after = zomg.measure(checkit)
        self.assertEquals(before, after)
コード例 #9
0
 def setUp(self):
     self.store = Store()
     privApp = PrivateApplication(store=self.store)
     installOn(privApp, self.store)
     self.model = TestableInequalityModel(
         self.store,
         DataThunkWithIndex,
         None,
         [DataThunkWithIndex.a],
         DataThunkWithIndex.a,
         True,
         privApp)
     self.counter = QueryCounter(self.store)
     self.data = []
     for i in range(4):
         self.data.append(DataThunkWithIndex(store=self.store, a=i))
コード例 #10
0
ファイル: test_paginate.py プロジェクト: DalavanCloud/axiom-1
    def _checkEfficiency(self, qry):
        s = qry.store
        mnum = [0]
        def more():
            mnum[0] += 1
            SingleColumnSortHelper(store=s, mainColumn=mnum[0], other=6)
        for i in range(5):
            more()

        qc = QueryCounter(s)
        # Sanity check: calling paginate() shouldn't do _any_ DB work.
        L = []
        m = qc.measure(
            # Let's also keep the page-size to 1, forcing the implementation to
            # get exactly 1 item each time.  (Otherwise the first N items will
            # take a fixed amount of work, the next 10, and so on, but each
            # subsequent item will take 0, breaking our attempt to measure
            # below)
            lambda : L.append(qry.paginate(pagesize=1)))
        self.assertEquals(m, 0)
        y = L.pop()
        g = iter(y)
        # startup costs a little more, so ignore that
        # s.debug = True
        what = qc.measure(g.next)                # 1
        oneunit = qc.measure(g.next)                   # 2
        otherunit = qc.measure(g.next)
        self.assertEquals(otherunit, oneunit) # 3
        # Now, make some more data

        for i in range(3):
            more()
        # and make sure that doesn't increase the amount of work
        self.assertEquals(qc.measure(g.next), oneunit) # 4
        self.assertEquals(qc.measure(g.next), oneunit) # 5
        self.assertEquals(qc.measure(g.next), oneunit) # 6

        # one more sanity check - we're at the end.
        self.assertEquals(g.next().mainColumn, 7)
        self.assertEquals(g.next().mainColumn, 8)
        self.assertEquals(list(g), [])
コード例 #11
0
ファイル: test_paginate.py プロジェクト: perkinslr/axiom-py3
    def _checkEfficiency(self, qry):
        s = qry.store
        mnum = [0]
        def more():
            mnum[0] += 1
            SingleColumnSortHelper(store=s, mainColumn=mnum[0], other=6)
        for i in range(5):
            more()

        qc = QueryCounter(s)
        # Sanity check: calling paginate() shouldn't do _any_ DB work.
        L = []
        m = qc.measure(
            # Let's also keep the page-size to 1, forcing the implementation to
            # get exactly 1 item each time.  (Otherwise the first N items will
            # take a fixed amount of work, the next 10, and so on, but each
            # subsequent item will take 0, breaking our attempt to measure
            # below)
            lambda : L.append(qry.paginate(pagesize=1)))
        self.assertEqual(m, 0)
        y = L.pop()
        g = iter(y)
        # startup costs a little more, so ignore that
        # s.debug = True
        what = qc.measure(g.__next__)                # 1
        oneunit = qc.measure(g.__next__)                   # 2
        otherunit = qc.measure(g.__next__)
        self.assertEqual(otherunit, oneunit) # 3
        # Now, make some more data

        for i in range(3):
            more()
        # and make sure that doesn't increase the amount of work
        self.assertEqual(qc.measure(g.__next__), oneunit) # 4
        self.assertEqual(qc.measure(g.__next__), oneunit) # 5
        self.assertEqual(qc.measure(g.__next__), oneunit) # 6

        # one more sanity check - we're at the end.
        self.assertEqual(g.next().mainColumn, 7)
        self.assertEqual(g.next().mainColumn, 8)
        self.assertEqual(list(g), [])
コード例 #12
0
class InequalityPerformanceTests(unittest.TestCase):
    """
    Tests for the complexity and runtime costs of the methods of
    L{InequalityModel}.
    """
    def setUp(self):
        self.store = Store()
        privApp = PrivateApplication(store=self.store)
        installOn(privApp, self.store)
        self.model = TestableInequalityModel(self.store, DataThunkWithIndex,
                                             None, [DataThunkWithIndex.a],
                                             DataThunkWithIndex.a, True,
                                             privApp)
        self.counter = QueryCounter(self.store)
        self.data = []
        for i in range(4):
            self.data.append(DataThunkWithIndex(store=self.store, a=i))

    def rowsAfterValue(self, value, count):
        return self.counter.measure(self.model.rowsAfterValue, value, count)

    def rowsAfterItem(self, item, count):
        return self.counter.measure(self.model.rowsAfterItem, item, count)

    def rowsBeforeValue(self, value, count):
        return self.counter.measure(self.model.rowsBeforeValue, value, count)

    def rowsBeforeItem(self, item, count):
        return self.counter.measure(self.model.rowsBeforeItem, item, count)

    def test_rowsAfterValue(self):
        """
        Verify that the cost of L{InequalityModel.rowsAfterValue} is
        independent of the total number of rows in the table being queried, as
        long as that number is greater than the number of rows requested.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)

    def test_rowsAfterValueWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows before the start value.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)

    def test_rowsAfterValueWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows exactly at the start value.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=1)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)

    def test_rowsAfterValueWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows in the result set.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=2)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)

    def test_rowsAfterValueWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows past the end of the result set.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)

    def test_rowsAfterItem(self):
        """
        Like L{test_rowsAfterValue}, but for L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)

    def test_rowsAfterItemWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesBeforeStart}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        DataThunkWithIndex(store=self.store, a=-1)
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=-1)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)

    def test_rowsAfterItemWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAtStart}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)

    test_rowsAfterItemWithDuplicatesAtStart.todo = (
        "Index scan to find appropriate storeID starting point once the "
        "value index has been used to seek to /near/ the correct starting "
        "place causes this to be O(N) on the number of rows with duplicate "
        "values.")

    def test_rowsAfterItemWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesInResult}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=1)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)

    def test_rowsAfterItemWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAfter}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=3)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)

    def test_rowsBeforeValue(self):
        """
        Like L{test_rowsAfterValue}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=-1)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)

    def test_rowsBeforeValueWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesBeforeStart}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=3)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)

    def test_rowsBeforeValueWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAtStart}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=2)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)

    def test_rowsBeforeValueWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesInResult}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=1)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)

    def test_rowsBeforeValueWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAfter}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)

    def test_rowsBeforeItem(self):
        """
        Like L{test_rowsAfterItem}, but for L{InequalityModel.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=-1)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)

    def test_rowsBeforeItemWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesBeforeStart}, but for
        L{InequalityModel.rowsBeforeItem}.
        """
        DataThunkWithIndex(store=self.store, a=4)
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)

    def test_rowsBeforeItemWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesAtStart}, but for
        L{Inequality.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=3)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)

    test_rowsBeforeItemWithDuplicatesAtStart.todo = (
        "Index scan to find appropriate storeID starting point once the "
        "value index has been used to seek to /near/ the correct starting "
        "place causes this to be O(N) on the number of rows with duplicate "
        "values.")

    def test_rowsBeforeItemWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesInResult}, but for
        L{Inequality.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=2)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)

    def test_rowsBeforeItemWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesAfter}, but for
        L{InequalityModel.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)
コード例 #13
0
class InequalityPerformanceTests(unittest.TestCase):
    """
    Tests for the complexity and runtime costs of the methods of
    L{InequalityModel}.
    """
    def setUp(self):
        self.store = Store()
        privApp = PrivateApplication(store=self.store)
        installOn(privApp, self.store)
        self.model = TestableInequalityModel(
            self.store,
            DataThunkWithIndex,
            None,
            [DataThunkWithIndex.a],
            DataThunkWithIndex.a,
            True,
            privApp)
        self.counter = QueryCounter(self.store)
        self.data = []
        for i in range(4):
            self.data.append(DataThunkWithIndex(store=self.store, a=i))


    def rowsAfterValue(self, value, count):
        return self.counter.measure(self.model.rowsAfterValue, value, count)


    def rowsAfterItem(self, item, count):
        return self.counter.measure(self.model.rowsAfterItem, item, count)


    def rowsBeforeValue(self, value, count):
        return self.counter.measure(self.model.rowsBeforeValue, value, count)


    def rowsBeforeItem(self, item, count):
        return self.counter.measure(self.model.rowsBeforeItem, item, count)


    def test_rowsAfterValue(self):
        """
        Verify that the cost of L{InequalityModel.rowsAfterValue} is
        independent of the total number of rows in the table being queried, as
        long as that number is greater than the number of rows requested.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)


    def test_rowsAfterValueWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows before the start value.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)


    def test_rowsAfterValueWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows exactly at the start value.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=1)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)


    def test_rowsAfterValueWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows in the result set.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=2)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)


    def test_rowsAfterValueWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterValue}, but verify the behavior in the face of
        duplicate rows past the end of the result set.
        """
        first = self.rowsAfterValue(1, 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsAfterValue(1, 2)
        self.assertEqual(first, second)


    def test_rowsAfterItem(self):
        """
        Like L{test_rowsAfterValue}, but for L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)


    def test_rowsAfterItemWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesBeforeStart}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        DataThunkWithIndex(store=self.store, a=-1)
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=-1)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)


    def test_rowsAfterItemWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAtStart}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)
    test_rowsAfterItemWithDuplicatesAtStart.todo = (
        "Index scan to find appropriate storeID starting point once the "
        "value index has been used to seek to /near/ the correct starting "
        "place causes this to be O(N) on the number of rows with duplicate "
        "values.")


    def test_rowsAfterItemWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesInResult}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=1)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)


    def test_rowsAfterItemWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAfter}, but for
        L{InequalityModel.rowsAfterItem}.
        """
        first = self.rowsAfterItem(self.data[0], 2)
        DataThunkWithIndex(store=self.store, a=3)
        second = self.rowsAfterItem(self.data[0], 2)
        self.assertEqual(first, second)


    def test_rowsBeforeValue(self):
        """
        Like L{test_rowsAfterValue}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=-1)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)


    def test_rowsBeforeValueWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesBeforeStart}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=3)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)


    def test_rowsBeforeValueWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAtStart}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=2)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)


    def test_rowsBeforeValueWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesInResult}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=1)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)


    def test_rowsBeforeValueWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterValueWithDuplicatesAfter}, but for
        L{InequalityModel.rowsBeforeValue}.
        """
        first = self.rowsBeforeValue(2, 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsBeforeValue(2, 2)
        self.assertEqual(first, second)


    def test_rowsBeforeItem(self):
        """
        Like L{test_rowsAfterItem}, but for L{InequalityModel.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=-1)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)


    def test_rowsBeforeItemWithDuplicatesBeforeStart(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesBeforeStart}, but for
        L{InequalityModel.rowsBeforeItem}.
        """
        DataThunkWithIndex(store=self.store, a=4)
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=4)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)


    def test_rowsBeforeItemWithDuplicatesAtStart(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesAtStart}, but for
        L{Inequality.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=3)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)
    test_rowsBeforeItemWithDuplicatesAtStart.todo = (
        "Index scan to find appropriate storeID starting point once the "
        "value index has been used to seek to /near/ the correct starting "
        "place causes this to be O(N) on the number of rows with duplicate "
        "values.")


    def test_rowsBeforeItemWithDuplicatesInResult(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesInResult}, but for
        L{Inequality.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=2)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)


    def test_rowsBeforeItemWithDuplicatesAfter(self):
        """
        Like L{test_rowsAfterItemWithDuplicatesAfter}, but for
        L{InequalityModel.rowsBeforeItem}.
        """
        first = self.rowsBeforeItem(self.data[3], 2)
        DataThunkWithIndex(store=self.store, a=0)
        second = self.rowsBeforeItem(self.data[3], 2)
        self.assertEqual(first, second)