Exemplo n.º 1
0
    def test_list_versions_same_rows(self):
        """
        check that this can find all the same rows list_versions returns in the
        versioned case above
        """
        log = logging.getLogger("test_list_versions_same_rows")

        sql_text = list_versions(_test_collection_id,
                                 versioned=True,
                                 prefix=_test_prefix)

        args = {
            "collection_id": _test_collection_id,
            "prefix": _test_prefix,
        }

        with open("/tmp/debug.sql", "w") as debug_sql_file:
            debug_sql_file.write(mogrify(sql_text, args))

        cursor = self._connection.cursor()
        cursor.execute(sql_text, args)
        list_versions_rows = cursor.fetchall()
        cursor.close()

        for list_versions_row in list_versions_rows:
            sql_text = version_for_key(
                _test_collection_id,
                versioned=True,
                key=list_versions_row["key"],
                unified_id=list_versions_row["unified_id"])

            args = {
                "collection_id": _test_collection_id,
                "key": list_versions_row["key"],
                "unified_id": list_versions_row["unified_id"]
            }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            version_for_key_rows = cursor.fetchall()
            cursor.close()

            self.assertTrue(
                len(version_for_key_rows) > 0,
                "{0} {1}".format(args, list_versions_row))
            for version_for_key_row in version_for_key_rows:
                self.assertEqual(version_for_key_row["key"],
                                 list_versions_row["key"])
                self.assertEqual(version_for_key_row["unified_id"],
                                 list_versions_row["unified_id"],
                                 list_versions_row)
    def test_list_versions_same_rows(self):
        """
        check that this can find all the same rows list_versions returns in the
        versioned case above
        """
        log = logging.getLogger("test_list_versions_same_rows")

        sql_text = list_versions(_test_collection_id, 
                                 versioned=True, 
                                 prefix=_test_prefix) 

        args = {"collection_id" : _test_collection_id,
                "prefix"        : _test_prefix, }

        with open("/tmp/debug.sql", "w") as debug_sql_file:
            debug_sql_file.write(mogrify(sql_text, args))

        cursor = self._connection.cursor()
        cursor.execute(sql_text, args)
        list_versions_rows = cursor.fetchall()
        cursor.close()

        for list_versions_row in list_versions_rows:
            sql_text = version_for_key(_test_collection_id, 
                                       versioned=True,
                                       key=list_versions_row["key"], 
                                       unified_id=list_versions_row["unified_id"]) 

            args = {"collection_id" : _test_collection_id,
                    "key"           : list_versions_row["key"], 
                    "unified_id"    : list_versions_row["unified_id"]}

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            version_for_key_rows = cursor.fetchall()
            cursor.close()

            self.assertTrue(len(version_for_key_rows) > 0, 
                            "{0} {1}".format(args, list_versions_row))
            for version_for_key_row in version_for_key_rows:
                self.assertEqual(version_for_key_row["key"],
                                 list_versions_row["key"])
                self.assertEqual(version_for_key_row["unified_id"],
                                 list_versions_row["unified_id"],
                                 list_versions_row)
Exemplo n.º 3
0
    def test_version_for_key(self):
        """
        version_for_key 
        """
        log = logging.getLogger("test_version_for_key")

        # check that for every row in list_keys, calling version_for_key with
        # unified_id=None should return the same row, regardless of it being
        # versioned or not.
        for versioned in [True, False]:
            sql_text = list_keys(_test_collection_id,
                                 versioned=versioned,
                                 prefix=_test_prefix)

            args = {
                "collection_id": _test_collection_id,
                "prefix": _test_prefix,
            }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            baseline_rows = cursor.fetchall()
            cursor.close()

            for row in baseline_rows:
                sql_text = version_for_key(_test_collection_id,
                                           versioned=versioned,
                                           key=row["key"])

                args = {
                    "collection_id": _test_collection_id,
                    "key": row["key"]
                }

                cursor = self._connection.cursor()
                if _write_debug_sql:
                    with open("/tmp/debug.sql", "w") as debug_sql_file:
                        debug_sql_file.write(mogrify(sql_text, args))
                cursor.execute(sql_text, args)
                test_rows = cursor.fetchall()
                cursor.close()

                # 2012-12-20 dougfort -- list_keys and list_versions only
                # retrieve one conjoined part, but version_for_key retrieves
                # all conjoined parts. So we may have more than one row here.
                self.assertTrue(len(test_rows) > 0)
                for test_row in test_rows:
                    self.assertEqual(test_row["key"], row["key"],
                                     (test_row["key"], row["key"]))
                    self.assertEqual(
                        test_row["unified_id"], row["unified_id"],
                        (test_row["unified_id"], row["unified_id"]))

        # check that these return empty
        for versioned in [True, False]:
            sql_text = version_for_key(_test_collection_id,
                                       versioned=versioned,
                                       key=_test_key,
                                       unified_id=_test_no_such_unified_id)

            args = {
                "collection_id": _test_collection_id,
                "key": row["key"],
                "unified_id": _test_no_such_unified_id
            }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            test_rows = cursor.fetchall()
            cursor.close()
            self.assertEqual(len(test_rows), 0, test_rows)
Exemplo n.º 4
0
    def test_limits_and_markers(self):
        """
        check that the limits and markers work correctly. 
        perhaps take the result with limit=None, and run a series of queries 
        with limit=1 for each of those rows, checking results.
        """
        log = logging.getLogger("test_limits_and_markers")

        for versioned in [True, False]:
            sql_text = list_keys(_test_collection_id,
                                 versioned=versioned,
                                 prefix=_test_prefix)

            args = {
                "collection_id": _test_collection_id,
                "prefix": _test_prefix,
            }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            baseline_rows = cursor.fetchall()
            cursor.close()

            key_marker = None
            for row in baseline_rows:
                sql_text = list_keys(_test_collection_id,
                                     versioned=versioned,
                                     prefix=_test_prefix,
                                     key_marker=key_marker,
                                     limit=1)

                args = {
                    "collection_id": _test_collection_id,
                    "prefix": _test_prefix,
                    "key_marker": key_marker,
                    "limit": 1
                }

                cursor = self._connection.cursor()
                cursor.execute(sql_text, args)
                test_row = cursor.fetchone()
                cursor.close()

                self.assertEqual(test_row["key"], row["key"],
                                 (test_row["key"], row["key"]))
                self.assertEqual(test_row["unified_id"], row["unified_id"],
                                 (test_row["unified_id"], row["unified_id"]))

                key_marker = test_row["key"]

        for versioned in [True, False]:
            sql_text = list_versions(_test_collection_id,
                                     versioned=versioned,
                                     prefix=_test_prefix,
                                     limit=None)

            args = {
                "collection_id": _test_collection_id,
                "prefix": _test_prefix,
            }

            if _write_debug_sql:
                with open("/tmp/debug_all.sql", "w") as debug_sql_file:
                    debug_sql_file.write(mogrify(sql_text, args))

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            baseline_rows = cursor.fetchall()
            cursor.close()
            baseline_set = set([(
                r["key"],
                r["unified_id"],
            ) for r in baseline_rows])
            key_marker = None
            version_marker = None
            for row_idx, row in enumerate(baseline_rows):
                sql_text = list_versions(_test_collection_id,
                                         versioned=versioned,
                                         prefix=_test_prefix,
                                         key_marker=key_marker,
                                         version_marker=version_marker,
                                         limit=1)

                args = {
                    "collection_id": _test_collection_id,
                    "prefix": _test_prefix,
                    "limit": 1
                }

                if key_marker is not None:
                    args["key_marker"] = key_marker
                if version_marker is not None:
                    args["version_marker"] = version_marker

                if _write_debug_sql:
                    debug_filename = "/tmp/debug_%s.sql" % (row_idx, )
                    with open(debug_filename, "w") as debug_sql_file:
                        debug_sql_file.write(mogrify(sql_text, args))

                # this result should always be stable. is it?
                last_time = None
                for _ in range(5):
                    cursor = self._connection.cursor()
                    cursor.execute(sql_text, args)
                    test_row = cursor.fetchone()
                    cursor.close()
                    if last_time is not None:
                        assert test_row == last_time
                    last_time = test_row

                # make sure it's in the result somewhere. below we test if it's
                # in the right order.
                self.assertEqual((test_row["key"], test_row["unified_id"])
                                 in baseline_set, True)

                log.info("{0}, {1}".format(test_row["key"], row["key"]))
                log.debug(sql_text)

                self.assertEqual(
                    test_row["key"], row["key"],
                    (row_idx, versioned, test_row["key"], row["key"]))
                self.assertEqual(test_row["unified_id"], row["unified_id"],
                                 (row_idx, versioned, test_row["unified_id"],
                                  row["unified_id"]))

                key_marker = test_row["key"]
                version_marker = test_row["unified_id"]
Exemplo n.º 5
0
    def test_list(self):
        """
        test listing keys and versions of keys
        """
        log = logging.getLogger("test_list")

        versioned = False
        sql_text = list_versions(_test_collection_id,
                                 versioned=versioned,
                                 prefix=_test_prefix,
                                 limit=None)

        args = {
            "collection_id": _test_collection_id,
            "prefix": _test_prefix,
        }

        if _write_debug_sql:
            with open("/tmp/debug_unversioned_rows.sql",
                      "w") as debug_sql_file:
                debug_sql_file.write(mogrify(sql_text, args))

        cursor = self._connection.cursor()
        cursor.execute(sql_text, args)
        unversioned_rows = cursor.fetchall()
        cursor.close()

        collectable_set = self._retrieve_collectables(versioned)
        test_set = set([(
            r["key"],
            r["unified_id"],
        ) for r in unversioned_rows])
        collectable_intersection = test_set & collectable_set
        self.assertEqual(len(collectable_intersection), 0,
                         collectable_intersection)

        # check that there's no more than one row per key for a non-versioned
        # collection
        # check that every row begins with prefix
        unversioned_key_counts = Counter()
        for row in unversioned_rows:
            unversioned_key_counts[row["key"]] += 1
            self.assertTrue(row["key"].startswith(_test_prefix))
        for key, value in unversioned_key_counts.items():
            self.assertEqual(value, 1, (key, value))

        versioned = True
        sql_text = list_versions(_test_collection_id,
                                 versioned=versioned,
                                 prefix=_test_prefix)

        args = {
            "collection_id": _test_collection_id,
            "prefix": _test_prefix,
        }

        if _write_debug_sql:
            with open("/tmp/debug_versioned_rows.sql", "w") as debug_sql_file:
                debug_sql_file.write(mogrify(sql_text, args))

        cursor = self._connection.cursor()
        cursor.execute(sql_text, args)
        versioned_rows = cursor.fetchall()
        cursor.close()

        collectable_set = self._retrieve_collectables(versioned)
        test_set = set([(
            r["key"],
            r["unified_id"],
        ) for r in versioned_rows])
        collectable_intersection = test_set & collectable_set
        self.assertEqual(len(collectable_intersection), 0,
                         collectable_intersection)

        latest_versioned_rows = OrderedDict()
        for row in versioned_rows[::-1]:
            latest_versioned_rows.setdefault(row["key"], row)
        latest_versioned_rows = latest_versioned_rows.values()
        latest_versioned_rows.reverse()
        assert len(latest_versioned_rows) <= len(versioned_rows)

        versioned_key_counts = Counter()
        for row in versioned_rows:
            versioned_key_counts[row["key"]] += 1
            self.assertTrue(row["key"].startswith(_test_prefix))

        # check that there's >= as many rows now as above.
        for key, value in versioned_key_counts.items():
            self.assertTrue(value >= unversioned_key_counts[key], (key, value))

        # check that the list keys result is consistent with list_versions in
        # above (although there could be extra columns.)  Note that
        # list_keys(versioned=True) may have records that
        # list_versions(versioned=False) does not have, because there are more
        # ways for a segment to become eligible for garbage collection in an
        # unversioned collection.

        for versioned in [
                False,
                True,
        ]:
            sql_text = list_keys(_test_collection_id,
                                 versioned=versioned,
                                 prefix=_test_prefix,
                                 limit=None)

            args = {
                "collection_id": _test_collection_id,
                "prefix": _test_prefix,
            }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            key_rows = cursor.fetchall()
            cursor.close()

            if _write_debug_sql:
                debug_filename = "/tmp/debug_key_rows_versioned_%r.sql" % (
                    versioned, )
                with open(debug_filename, "w") as debug_sql_file:
                    debug_sql_file.write(mogrify(sql_text, args))

            collectable_set = self._retrieve_collectables(versioned)
            test_set = set([(
                r["key"],
                r["unified_id"],
            ) for r in key_rows])
            collectable_intersection = test_set & collectable_set
            self.assertEqual(len(collectable_intersection), 0,
                             collectable_intersection)

            if versioned:
                # a list of keys with versioning on may have keys that don't
                # show up in the list of unversioned rows.  That's because in
                # an unversioned collection, keys end when another key is
                # added.  So it's possible for that plus a tombstone to cause a
                # situation where an archive is not eligible for garbage
                # collection in a versioned collection, but it is eligible for
                # garbage collection in an unversioned collection.
                self.assertGreaterEqual(len(key_rows), len(unversioned_rows), (
                    len(key_rows),
                    len(unversioned_rows),
                    versioned,
                ))
            else:
                self.assertEqual(len(key_rows), len(unversioned_rows), (
                    len(key_rows),
                    len(unversioned_rows),
                    versioned,
                ))

            key_counts = Counter()
            for row in key_rows:
                key_counts[row["key"]] += 1
                self.assertTrue(row["key"].startswith(_test_prefix))
            for key, value in key_counts.items():
                self.assertEqual(value, 1, (key, value))

            if versioned:
                for key_row, version_row in zip(key_rows,
                                                latest_versioned_rows):
                    self.assertEqual(key_row["key"], version_row["key"])
                    self.assertEqual(key_row["unified_id"],
                                     version_row["unified_id"])
            else:
                for key_row, version_row in zip(key_rows, unversioned_rows):
                    self.assertEqual(key_row["key"], version_row["key"])
                    self.assertEqual(key_row["unified_id"],
                                     version_row["unified_id"])
    def test_version_for_key(self):
        """
        version_for_key 
        """
        log = logging.getLogger("test_version_for_key")

        # check that for every row in list_keys, calling version_for_key with
        # unified_id=None should return the same row, regardless of it being 
        # versioned or not.
        for versioned in [True, False]:
            sql_text = list_keys(_test_collection_id, 
                                 versioned=versioned, 
                                 prefix=_test_prefix)

            args = {"collection_id" : _test_collection_id,
                    "prefix"        : _test_prefix, }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            baseline_rows = cursor.fetchall()
            cursor.close()

            for row in baseline_rows:
                sql_text = version_for_key(_test_collection_id, 
                                           versioned=versioned, 
                                           key=row["key"])

                args = {"collection_id" : _test_collection_id,
                        "key"           : row["key"]} 

                cursor = self._connection.cursor()
                if _write_debug_sql:
                    with open("/tmp/debug.sql", "w") as debug_sql_file:
                        debug_sql_file.write(mogrify(sql_text, args))
                cursor.execute(sql_text, args)
                test_rows = cursor.fetchall()
                cursor.close()

                # 2012-12-20 dougfort -- list_keys and list_versions only
                # retrieve one conjoined part, but version_for_key retrieves
                # all conjoined parts. So we may have more than one row here.
                self.assertTrue(len(test_rows) > 0) 
                for test_row in test_rows:
                    self.assertEqual(test_row["key"], row["key"], 
                                     (test_row["key"], row["key"]))
                    self.assertEqual(test_row["unified_id"], row["unified_id"], 
                                     (test_row["unified_id"], row["unified_id"]))

        # check that these return empty
        for versioned in [True, False]:
            sql_text = version_for_key(_test_collection_id, 
                                       versioned=versioned, 
                                       key=_test_key,
                                       unified_id=_test_no_such_unified_id)

            args = {"collection_id" : _test_collection_id,
                    "key"           : row["key"],
                    "unified_id"    : _test_no_such_unified_id} 

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            test_rows = cursor.fetchall()
            cursor.close()
            self.assertEqual(len(test_rows), 0, test_rows)
    def test_limits_and_markers(self):
        """
        check that the limits and markers work correctly. 
        perhaps take the result with limit=None, and run a series of queries 
        with limit=1 for each of those rows, checking results.
        """
        log = logging.getLogger("test_limits_and_markers")

        for versioned in [True, False]:
            sql_text = list_keys(_test_collection_id, 
                                 versioned=versioned, 
                                 prefix=_test_prefix)

            args = {"collection_id" : _test_collection_id,
                    "prefix"        : _test_prefix, }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            baseline_rows = cursor.fetchall()
            cursor.close()

            key_marker = None
            for row in baseline_rows:
                sql_text = list_keys(_test_collection_id, 
                                     versioned=versioned, 
                                     prefix=_test_prefix,
                                     key_marker=key_marker,
                                     limit=1)

                args = {"collection_id" : _test_collection_id,
                        "prefix"        : _test_prefix, 
                        "key_marker"    : key_marker,
                        "limit"         : 1}

                cursor = self._connection.cursor()
                cursor.execute(sql_text, args)
                test_row = cursor.fetchone()
                cursor.close()
                
                self.assertEqual(test_row["key"], row["key"], 
                                 (test_row["key"], row["key"]))
                self.assertEqual(test_row["unified_id"], row["unified_id"], 
                                 (test_row["unified_id"], row["unified_id"]))

                key_marker = test_row["key"]

        for versioned in [True, False]:
            sql_text = list_versions(_test_collection_id, 
                                 versioned=versioned, 
                                 prefix=_test_prefix,
                                 limit=None)

            args = {"collection_id" : _test_collection_id,
                    "prefix"        : _test_prefix, }

            if _write_debug_sql:
                with open("/tmp/debug_all.sql", "w") as debug_sql_file:
                    debug_sql_file.write(mogrify(sql_text, args))

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            baseline_rows = cursor.fetchall()
            cursor.close()
            baseline_set = set([(r["key"], r["unified_id"], ) 
                                for r in baseline_rows])
            key_marker = None
            version_marker = None
            for row_idx, row in enumerate(baseline_rows):
                sql_text = list_versions(_test_collection_id, 
                                     versioned=versioned, 
                                     prefix=_test_prefix,
                                     key_marker=key_marker,
                                     version_marker=version_marker,
                                     limit=1)

                args = {"collection_id" : _test_collection_id,
                        "prefix"        : _test_prefix, 
                        "limit"         : 1}

                if key_marker is not None:
                    args["key_marker"] = key_marker
                if version_marker is not None:
                    args["version_marker"] = version_marker

                if _write_debug_sql:
                    debug_filename = "/tmp/debug_%s.sql" % (row_idx, )
                    with open(debug_filename, "w") as debug_sql_file:
                        debug_sql_file.write(mogrify(sql_text, args))

                # this result should always be stable. is it?
                last_time = None
                for _ in range(5):
                    cursor = self._connection.cursor()
                    cursor.execute(sql_text, args)
                    test_row = cursor.fetchone()
                    cursor.close()
                    if last_time is not None:
                        assert test_row == last_time
                    last_time = test_row

                # make sure it's in the result somewhere. below we test if it's
                # in the right order.
                self.assertEqual(
                    (test_row["key"], test_row["unified_id"]) in baseline_set,
                    True)
                
                log.info("{0}, {1}".format(test_row["key"], row["key"]))
                log.debug(sql_text)

                self.assertEqual(test_row["key"], row["key"], 
                                 (row_idx, versioned, test_row["key"], row["key"]))
                self.assertEqual(test_row["unified_id"], row["unified_id"], 
                                 (row_idx, versioned, test_row["unified_id"], row["unified_id"]))

                key_marker = test_row["key"]
                version_marker = test_row["unified_id"]
    def test_list(self):
        """
        test listing keys and versions of keys
        """
        log = logging.getLogger("test_list")

        versioned = False
        sql_text = list_versions(_test_collection_id, 
                                 versioned=versioned, 
                                 prefix=_test_prefix, 
                                 limit=None)

        args = {"collection_id" : _test_collection_id,
                "prefix"        : _test_prefix, }

        if _write_debug_sql:
            with open("/tmp/debug_unversioned_rows.sql", "w") as debug_sql_file:
                debug_sql_file.write(mogrify(sql_text, args))

        cursor = self._connection.cursor()
        cursor.execute(sql_text, args)
        unversioned_rows = cursor.fetchall()
        cursor.close()

        collectable_set = self._retrieve_collectables(versioned)
        test_set = set([(r["key"], r["unified_id"], ) for r in unversioned_rows])
        collectable_intersection = test_set & collectable_set
        self.assertEqual(len(collectable_intersection), 0, 
                         collectable_intersection)

        # check that there's no more than one row per key for a non-versioned 
        # collection
        # check that every row begins with prefix
        unversioned_key_counts = Counter()
        for row in unversioned_rows:
            unversioned_key_counts[row["key"]] += 1
            self.assertTrue(row["key"].startswith(_test_prefix))
        for key, value in unversioned_key_counts.items():
            self.assertEqual(value, 1, (key, value))

        versioned = True
        sql_text = list_versions(_test_collection_id, 
                                 versioned=versioned, 
                                 prefix=_test_prefix)

        args = {"collection_id" : _test_collection_id,
                "prefix"        : _test_prefix, }

        if _write_debug_sql:
            with open("/tmp/debug_versioned_rows.sql", "w") as debug_sql_file:
                debug_sql_file.write(mogrify(sql_text, args))

        cursor = self._connection.cursor()
        cursor.execute(sql_text, args)
        versioned_rows = cursor.fetchall()
        cursor.close()

        collectable_set = self._retrieve_collectables(versioned)
        test_set = set([(r["key"], r["unified_id"], ) for r in versioned_rows])
        collectable_intersection = test_set & collectable_set
        self.assertEqual(len(collectable_intersection), 0, 
                         collectable_intersection)
        
        latest_versioned_rows = OrderedDict()
        for row in versioned_rows[::-1]:
            latest_versioned_rows.setdefault(row["key"], row)
        latest_versioned_rows = latest_versioned_rows.values()
        latest_versioned_rows.reverse()
        assert len(latest_versioned_rows) <= len(versioned_rows)

        versioned_key_counts = Counter()
        for row in versioned_rows:
            versioned_key_counts[row["key"]] += 1
            self.assertTrue(row["key"].startswith(_test_prefix))

        # check that there's >= as many rows now as above.
        for key, value in versioned_key_counts.items():
            self.assertTrue(value >= unversioned_key_counts[key], (key, value))

        # check that the list keys result is consistent with list_versions in
        # above (although there could be extra columns.)  Note that
        # list_keys(versioned=True) may have records that
        # list_versions(versioned=False) does not have, because there are more
        # ways for a segment to become eligible for garbage collection in an
        # unversioned collection.

        for versioned in [False, True, ]:
            sql_text = list_keys(_test_collection_id, 
                                 versioned=versioned, 
                                 prefix=_test_prefix,
                                 limit=None)

            args = {"collection_id" : _test_collection_id,
                    "prefix"        : _test_prefix, }

            cursor = self._connection.cursor()
            cursor.execute(sql_text, args)
            key_rows = cursor.fetchall()
            cursor.close()

            if _write_debug_sql:
                debug_filename = "/tmp/debug_key_rows_versioned_%r.sql" % ( versioned, )
                with open(debug_filename, "w") as debug_sql_file:
                    debug_sql_file.write(mogrify(sql_text, args))

            collectable_set = self._retrieve_collectables(versioned)
            test_set = set([(r["key"], r["unified_id"], ) for r in key_rows])
            collectable_intersection = test_set & collectable_set
            self.assertEqual(len(collectable_intersection), 0, 
                             collectable_intersection)

            if versioned:
                # a list of keys with versioning on may have keys that don't
                # show up in the list of unversioned rows.  That's because in
                # an unversioned collection, keys end when another key is
                # added.  So it's possible for that plus a tombstone to cause a
                # situation where an archive is not eligible for garbage
                # collection in a versioned collection, but it is eligible for
                # garbage collection in an unversioned collection.
                self.assertGreaterEqual(len(key_rows), len(unversioned_rows), 
                    (len(key_rows), len(unversioned_rows), versioned, ))
            else:
                self.assertEqual(len(key_rows), len(unversioned_rows), 
                    (len(key_rows), len(unversioned_rows), versioned, ))

            key_counts = Counter()
            for row in key_rows:
                key_counts[row["key"]] += 1
                self.assertTrue(row["key"].startswith(_test_prefix))
            for key, value in key_counts.items():
                self.assertEqual(value, 1, (key, value))

            if versioned:
                for key_row, version_row in zip(key_rows, latest_versioned_rows):
                    self.assertEqual(key_row["key"], version_row["key"])
                    self.assertEqual(key_row["unified_id"], version_row["unified_id"])
            else:
                for key_row, version_row in zip(key_rows, unversioned_rows):
                    self.assertEqual(key_row["key"], version_row["key"])
                    self.assertEqual(key_row["unified_id"], version_row["unified_id"])