Esempio n. 1
0
    def _items_to_write(self, stored_oid_tid):
        # pylint:disable=too-many-locals
        all_entries_len = len(self.__bucket)

        # Tune quickly to the newest entries, materializing the list
        # TODO: Should we consolidate frequency information for the OID?
        # That can be expensive in the CFFI ring.
        newest_entries = self._newest_items()

        # Only write the newest entry for each OID.

        # Newly added items have a frequency of 1. They *may* be
        # useless stuff we picked up from an old cache file and
        # haven't touched again, or they may be something that
        # really is new and we have no track history for (that
        # would be in eden generation).
        #
        # Ideally we want to do something here that's similar to what
        # the SegmentedLRU does: if we would reject an item from eden,
        # then only keep it if it's better than the least popular
        # thing in probation.
        #
        # It's not clear that we have a good algorithm for this, and
        # what we tried takes a lot of time, so we don't bother.
        # TODO: Finish tuning these.

        # But we have a problem: If there's an object in the existing
        # cache that we have updated but now exclude, we would fail to
        # write its new data. So we can't exclude anything that's
        # changed without also nuking it from the DB.

        # Also, if we had a value loaded from the DB, and then we
        # modified it, but later ejected all entries for that object
        # from the cache, we wouldn't see it here, and we could be
        # left with stale data in the database. We must track the
        # (oid, tid) we load from the database and record that we
        # should DELETE those rows if that happens.
        # This is done with the __min_allowed_writeback dictionary,
        # which we mutate here: When we find a row we can write that meets
        # the requirement, we take it out of the dictionary.
        # Later, if there's anything left in the dictionary, we *know* there
        # may be stale entries in the cache, and we remove them.

        pop_min_required_tid = self._min_allowed_writeback.pop
        get_min_required_tid = self._min_allowed_writeback.get  # pylint:disable=no-member
        get_current_oid_tid = stored_oid_tid.get
        min_allowed_count = 0
        matching_tid_count = 0
        # When we accumulate all the rows here before returning them,
        # this function shows as about 3% of the total time to save
        # in a very large database.
        with _timer() as t:
            for oid, entry in newest_entries.items():
                # We must have something at least this fresh
                # to consider writing it out
                actual_tid = entry.value[1]
                min_allowed = get_min_required_tid(oid, -1)
                if min_allowed > actual_tid:
                    min_allowed_count += 1
                    continue

                # If we have something >= min_allowed, but == this,
                # it's not worth writing to the database (states should be identical).
                current_tid = get_current_oid_tid(oid, -1)
                if current_tid >= actual_tid:
                    matching_tid_count += 1
                    continue

                yield (oid, actual_tid, entry.value[0], entry.frequency)

                # We're able to satisfy this, so we don't need to consider
                # it in our min_allowed set anymore.
                # XXX: This isn't really the right place to do this.
                # Move this and write a test.
                pop_min_required_tid(oid, None)

        removed_entry_count = matching_tid_count + min_allowed_count
        logger.info(
            "Consolidated from %d entries to %d entries "
            "(rejected stale: %d; already in db: %d) in %s", all_entries_len,
            len(newest_entries) - removed_entry_count, min_allowed_count,
            matching_tid_count, t.duration)
Esempio n. 2
0
    def write_to_sqlite(self, connection):
        # pylint:disable=too-many-locals
        mem_before = get_memory_usage()

        cur = connection.cursor()

        db = Database.from_connection(connection)
        begin = time.time()

        # In a large benchmark, store_temp() accounts for 32%
        # of the total time, while move_from_temp accounts for 49%.

        # The batch size depends on how many params a stored proc can
        # have; if we go too big we get OperationalError: too many SQL
        # variables. The default is 999.
        # Note that the multiple-value syntax was added in
        # 3.7.11, 2012-03-20.
        with _timer() as batch_timer:
            cur.execute('BEGIN')
            stored_oid_tid = db.oid_to_tid
            fetch_current = time.time()
            count_written, _ = db.store_temp(
                self._items_to_write(stored_oid_tid))
            cur.execute("COMMIT")

        # Take out (reserved write) locks now; if we don't, we can get
        # 'OperationalError: database is locked' (the SQLITE_BUSY
        # error code; the SQLITE_LOCKED error code is about table
        # locks and has the string 'database table is locked') But
        # beginning immediate lets us stand in line.
        #
        # Note that the SQLITE_BUSY error on COMMIT happens "if an
        # another thread or process has a shared lock on the database
        # that prevented the database from being updated.", But "When
        # COMMIT fails in this way, the transaction remains active and
        # the COMMIT can be retried later after the reader has had a
        # chance to clear." So we could retry the commit a couple
        # times and give up if can't get there. It looks like one
        # production database takes about 2.5s to execute this entire function;
        # it seems like most instances that are shutting down get to this place
        # at roughly the same time and stack up waiting:
        #
        # Instance | Save Time | write_to_sqlite time
        #     1    |   2.36s   |   2.35s
        #     2    |   5.31s   |   5.30s
        #     3    |   6.51s   |   6.30s
        #     4    |   7.91s   |   7.90s
        #
        # That either suggests that all waiting happens just to acquire this lock and
        # commit this transaction (and that subsequent operations are inconsequential
        # in terms of time) OR that the exclusive lock isn't truly getting dropped,
        # OR that some function in subsequent processes is taking longer and longer.
        # And indeed, our logging showed that a gc.collect() at the end of this function was
        # taking more and more time:
        #
        # Instance | GC Time  | Reported memory usage after GC
        #    1     |   2.00s  |     34.4MB
        #    2     |   3.50s  |     83.2MB
        #    3     |   4.94s  |    125.9MB
        #    4     |   6.44   |    202.1MB
        #
        # The exclusive lock time did vary, but not as much; trim time
        # didn't really vary:
        #
        # Instance |  Exclusive Write Time | Batch Insert Time | Fetch Current
        #    1     |   0.09s               |   0.12s           |   0.008s
        #    2     |   1.19s               |   0.44s           |   0.008s
        #    3     |   0.91s               |   0.43s           |   0.026s
        #    4     |   0.92s               |   0.34s           |   0.026s
        with _timer() as exclusive_timer:
            cur.execute('BEGIN IMMEDIATE')
            # During the time it took for us to commit, and the time that we
            # got the lock, it's possible that someone else committed and
            # changed the data in object_state
            # Things might have changed in the database since our
            # snapshot where we put temp objects in, so we can't rely on
            # just assuming that's the truth anymore.
            rows_inserted = db.move_from_temp()
            if self.checkpoints:
                db.update_checkpoints(*self.checkpoints)

            cur.execute('COMMIT')
        # TODO: Maybe use BTrees.family.intersection to get the common keys?
        # Or maybe use a SQLite Python function?
        # 'DELETE from object_state where is_stale(zoid, tid)'
        # What's fastest? The custom function version would require scanning the entire
        # table; unfortunately this sqlite interface doesn't allow defining virtual tables.
        with _timer() as trim_timer:
            # Delete anything we still know is stale, because we
            # saw a new TID for it, but we had nothing to replace it with.
            min_allowed_writeback = OID_TID_MAP_TYPE()
            for k, v in self._min_allowed_writeback.items():
                if stored_oid_tid.get(k, MAX_TID) < v:
                    min_allowed_writeback[k] = v
            db.trim_to_size(self.limit, min_allowed_writeback)
            del min_allowed_writeback
        # Typically we write when we're closing and don't expect to do
        # anything else, so there's no need to keep tracking this info.
        self._min_allowed_writeback = OID_TID_MAP_TYPE()

        # Cleanups
        cur.close()
        del cur
        db.close()  # closes the connection.
        del db
        del stored_oid_tid

        # We're probably shutting down, don't perform a GC; we see
        # that can get quite lengthy.

        end = time.time()
        stats = self.stats()
        mem_after = get_memory_usage()
        logger.info(
            "Wrote %d items to %s in %s "
            "(%s to fetch current, %s to insert batch (%d rows), %s to write, %s to trim). "
            "Used %s memory. (Storage: %s) "
            "Total hits %s; misses %s; ratio %s (stores: %s)", count_written,
            connection, end - begin, fetch_current - begin,
            batch_timer.duration, rows_inserted,
            exclusive_timer.duration, trim_timer.duration,
            byte_display(mem_after - mem_before), self._bucket0, stats['hits'],
            stats['misses'], stats['ratio'], stats['sets'])

        return count_written
Esempio n. 3
0
    def _items_to_write(self, stored_oid_tid):
        # pylint:disable=too-many-locals
        all_entries_len = len(self._cache)

        # Only write the newest entry for each OID.

        # Newly added items have a frequency of 1. They *may* be
        # useless stuff we picked up from an old cache file and
        # haven't touched again, or they may be something that
        # really is new and we have no track history for (that
        # would be in eden generation).
        #
        # Ideally we want to do something here that's similar to what
        # the SegmentedLRU does: if we would reject an item from eden,
        # then only keep it if it's better than the least popular
        # thing in probation.
        #
        # It's not clear that we have a good algorithm for this, and
        # what we tried takes a lot of time, so we don't bother.
        # TODO: Finish tuning these.

        # But we have a problem: If there's an object in the existing
        # cache that we have updated but now exclude, we would fail to
        # write its new data. So we can't exclude anything that's
        # changed without also nuking it from the DB.

        # Also, if we had a value loaded from the DB, and then we
        # modified it, but later ejected all entries for that object
        # from the cache, we wouldn't see it here, and we could be
        # left with stale data in the database. We must track the
        # (oid, tid) we load from the database and record that we
        # should DELETE those rows if that happens.
        # This is done with the __min_allowed_writeback dictionary,
        # which we mutate here: When we find a row we can write that meets
        # the requirement, we take it out of the dictionary.
        # Later, if there's anything left in the dictionary, we *know* there
        # may be stale entries in the cache, and we remove them.

        # The *object_index* is our best polling data; anything it has it gospel,
        # so if it has an entry for an object, it superceeds our own.
        get_current_oid_tid = stored_oid_tid.get
        matching_tid_count = 0
        # When we accumulate all the rows here before returning them,
        # this function shows as about 3% of the total time to save
        # in a very large database.
        with _timer() as t:
            for oid, lru_entry in self._cache.iteritems():
                newest_value = lru_entry.newest_value
                # We must have something at least this fresh
                # to consider writing it out
                if newest_value is None:
                    raise AssertionError("Value should not be none", oid,
                                         lru_entry)
                actual_tid = newest_value.tid

                # If we have something >= min_allowed, matching
                # what's in the database, or even older (somehow),
                # it's not worth writing to the database (states should be identical).
                current_tid = get_current_oid_tid(oid, -1)
                if current_tid >= actual_tid:
                    matching_tid_count += 1
                    continue

                yield (oid, actual_tid, newest_value.frozen,
                       bytes(newest_value.state), lru_entry.frequency)

                # We're able to satisfy this, so we don't need to consider
                # it in our min_allowed set anymore.
                # XXX: This isn't really the right place to do this.
                # Move this and write a test.

        removed_entry_count = matching_tid_count
        logger.info(
            "Examined %d entries and rejected %d "
            "(already in db: %d) in %s", all_entries_len, removed_entry_count,
            matching_tid_count, t.duration)