Exemple #1
0
    def _init_db(self):
        """
        Creates the database schema, or clears the database if the cache is empty.
        This method cannot be called while the database is in normal use.
        """
        metadata = MetaData(bind=self._db)
        self._db_metadata = metadata

        # Set the ORM mapping
        cache_table = CacheEntry.get_alchemy_mapping(metadata)
        try:
            mapper(CacheEntry, cache_table)
        except ArgumentError:
            self._logger.warning("Cache DB mapping has already been performed")

        # The next section must only be attempted by one process at a time on server startup
        self.get_global_lock()
        try:
            # Check the control database schema has been created
            if not cache_table.exists():
                self._create_db_schema(cache_table)
                self._logger.info("Cache control database created.")

            # See if the cache is empty
            if self.capacity() > 0:
                # -1 to uncount the global lock
                cache_count = self.count() - 1
                if cache_count <= 0:
                    # See if the control database is empty
                    db_session = self._db.Session()
                    db_count = db_session.query(CacheEntry.key).limit(1).count()
                    db_session.close()
                    if db_count > 0:
                        # Cache is empty, control database is not. Delete and re-create
                        # the database so we're not left with any fragmentation, etc.
                        self._logger.info("Cache is empty. Resetting cache control database.")
                        self._drop_db_schema(cache_table)
                        self._create_db_schema(cache_table)
            else:
                self._logger.warn("Cache is down, skipped cache control database check.")

            self._logger.info("Cache control database opened.")
        finally:
            self.free_global_lock()
Exemple #2
0
    def put(self, key, obj, expiry_secs=0, search_info=None):
        """
        Adds or replaces a managed object in cache, with an optional expiry time
        in seconds. The object can be of any size; if the object is too large to
        store in one cache slot, it will be transparently stored as multiple chunks.

        search_info, if provided, should be a dictionary of the form:
        { 'searchfield1': 1000, 'searchfield2': 2000, 'searchfield3': 3000,
          'searchfield4': None, 'searchfield5': None, 'metadata': some_object }
        and will be stored in the cache control database, allowing later
        searching of the cache via the search() function.
        All dictionary keys are mandatory but the values may be set to None.

        Returns a boolean indicating success.
        """
        # Split object into chunks
        chunks = {}
        num_slots = self._slots_for_size(len(obj))
        if num_slots > MAX_OBJECT_SLOTS:
            return False
        for slot in range(1, num_slots + 1):
            from_offset = (slot - 1) * MAX_SLOT_SIZE
            to_offset = len(obj) if slot == num_slots else (slot * MAX_SLOT_SIZE)
            slot_header = self._get_slot_header(num_slots) if slot == 1 else ""
            chunks[key + "_" + str(slot)] = slot_header + obj[from_offset:to_offset]
        # Add chunks to cache
        if self.raw_putn(chunks, expiry_secs):
            # Chunks added. Prepare control db entry.
            entry = CacheEntry(key, len(obj))
            if search_info is not None:
                entry.searchfield1 = search_info["searchfield1"]
                entry.searchfield2 = search_info["searchfield2"]
                entry.searchfield3 = search_info["searchfield3"]
                entry.searchfield4 = search_info["searchfield4"]
                entry.searchfield5 = search_info["searchfield5"]
                if search_info["metadata"] is not None:
                    entry.metadata = cPickle.dumps(search_info["metadata"], protocol=cPickle.HIGHEST_PROTOCOL)
            # Add/update entry in the control db
            db_session = self._db.Session()
            db_committed = False
            try:
                db_session.merge(entry)
                db_session.commit()
                db_committed = True
            except IntegrityError:
                # Rarely, 2 threads merging (adding) the same key causes a duplicate key error
                db_session.rollback()
                db_session.query(CacheEntry).filter(CacheEntry.key == entry.key).update(
                    {
                        "valuesize": entry.valuesize,
                        "searchfield1": entry.searchfield1,
                        "searchfield2": entry.searchfield2,
                        "searchfield3": entry.searchfield3,
                        "searchfield4": entry.searchfield4,
                        "searchfield5": entry.searchfield5,
                        "metadata": entry.metadata,
                    },
                    synchronize_session=False,
                )
                db_session.commit()
                db_committed = True
            finally:
                try:
                    if not db_committed:
                        db_session.rollback()
                finally:
                    db_session.close()
            return True
        else:
            # Delete everything for key (if there was a previous object for this
            # key, we might now have a mix of chunk versions in the cache).
            self.delete(key)
            return False