Ejemplo n.º 1
0
def command_whoosh(action=None):
    if action == "drop":
        write_handlers.drop_indexes()
    elif action == "reindex":
        # drop root privileges if root, obviously the index needs to be
        # created with the same permissions as the app is running with.
        drop_privileges()

        from opmuse.library import library_dao
        from opmuse.database import database_data, get_session

        database_data.database = get_session()

        write_handlers.init_indexes()

        for artist in library_dao.get_artists():
            search.add_artist(artist)

        for album in library_dao.get_albums():
            search.add_album(album)

        for track in library_dao.get_tracks():
            search.add_track(track)

        write_handlers.commit()

        database_data.database.remove()
        database_data.database = None
    else:
        parser.error('Needs to provide a valid action (drop, reindex).')
Ejemplo n.º 2
0
    def _unserialize(self):
        """
        Unserialize / load cache objects from database.
        """

        database = get_session()

        item_count = 0

        for object in database.query(CacheObject).all():
            item_count += 1

            if object.type == 'object':
                value = pickle.loads(object.value)
            elif object.type == 'str':
                value = object.value.decode()
            elif object.type == 'dict' or object.type == 'list':
                value = json.loads(object.value.decode())
            else:
                value = object.value

            cache.storage.set(object.key, object.updated, value)

        database.remove()

        log("Unserialized %d cache objects" % item_count)
Ejemplo n.º 3
0
    def _serialize(self):
        """
        Serialize cache objects to database.

        TODO actually use "updated" var to see if we even need to serialize/persist the changes...
        TODO lock cache storage while serializing?
        """

        database = get_session()

        item_count = 0

        for key, item in cache.storage.values():
            item_count += 1

            value = item['value']
            updated = item['updated']

            count = (database.query(func.count(CacheObject.id))
                     .filter(CacheObject.key == key).scalar())

            # Dont serialize Keep values, this is for example useful for bgtasks
            # that was added to the queue but never run because of a restart.
            # This way they will just be triggered again on the next start/use
            # and we dont have to wait for the Keep time to run out.
            if value is Keep:
                continue
            elif isinstance(value, object):
                value_type = 'object'
            else:
                value_type = type(value).__name__

            if value_type == 'object':
                value = pickle.dumps(value)
            elif value_type == 'str':
                value = value.encode()
            elif value_type == 'dict' or value_type == 'list':
                value = json.dumps(value).encode()

            if count > 0:
                parameters = {'updated': updated}

                parameters['value'] = value
                parameters['type'] = value_type

                database.query(CacheObject).filter(CacheObject.key == key).update(parameters)
            else:
                parameters = {'key': key, 'value': value, 'updated': updated, 'type': value_type}
                database.execute(CacheObject.__table__.insert(), parameters)

        database.commit()
        database.remove()

        log("Serialized %d cache objects" % item_count)
Ejemplo n.º 4
0
    def _gc(self):
        database = get_session()

        total_bytes_before = self._total_bytes()

        old_item_count = 0

        now = int(time.time())

        keys = []

        # remove old objects from memory
        for key, in (database.query(CacheObject.key)
                     .filter(text("(%d - updated) > %d" % (now, CachePlugin.GC_AGE))).all()):
            cache.delete(key)
            keys.append(key)
            old_item_count += 1

        # remove old objects from database
        if len(keys) > 0:
            database.query(CacheObject).filter(CacheObject.key.in_(keys)).delete(synchronize_session='fetch')

        database.commit()

        debug('Garbage collected %d old cache objects because of age' % old_item_count)

        big_item_count = 0

        # remove the 10 oldest cache objects until total cache size is below limit
        while True:
            total_bytes = self._total_bytes()

            if total_bytes > CachePlugin.GC_SIZE:
                keys = []

                # remove old oversized objects from memory
                for key, in database.query(CacheObject.key).order_by(CacheObject.updated.asc()).limit(10).all():
                    cache.delete(key)
                    keys.append(key)
                    big_item_count += 1

                # remove old oversized objects from database
                database.query(CacheObject).filter(CacheObject.key.in_(keys)).delete(synchronize_session='fetch')

                database.commit()
            else:
                break

        debug('Garbage collected %d old cache objects because of total cache size' % big_item_count)

        database.remove()

        log("Garbage collected %d cache objects, total cache size was %d kb and is now %d kb" %
            (old_item_count + big_item_count, total_bytes_before / 1024, total_bytes / 1024))
Ejemplo n.º 5
0
    def run(self, number):
        debug("Starting bgtask thread #%d" % number)

        while self._running:
            if self._running == "stop":
                break

            item = None

            try:
                try:
                    item = self.queue.get(block=True, timeout=2)
                    name, priority, func, args, kwargs = item.values()
                except queue.Empty:
                    if self._running == "drain":
                        break

                    continue
                else:
                    thread = threading.current_thread()

                    item.started = time.time()

                    thread.item = item
                    thread.name = name

                    self.running += 1

                    debug("Running bgtask in thread #%d %r with priority %d, args %r and kwargs %r." %
                          (number, func, priority, args, kwargs))

                    database_data.database = get_session()

                    func(*args, **kwargs)

                    try:
                        database_data.database.commit()
                    except:
                        database_data.database.rollback()
                        raise
            except Exception as error:
                log("Error in bgtask thread #%d %r, args %r and kwargs %r." %
                    (number, func, args, kwargs), traceback=True)

                item.fail(error)

                name, text, html = get_pretty_errors(sys.exc_info())

                name = "bgtask %r: %s" % (func, name)

                mail_pretty_errors(name, text, html)
            finally:
                if item is not None:
                    database_data.database.remove()
                    database_data.database = None

                    self.queue.task_done()
                    self.queue.done(item)

                    item.done = time.time()

                    thread.name = 'idle'

                    thread.item = None

                    self.done.put(item)

                    # store max 50 items in done queue
                    if self.done.qsize() > 50:
                        self.done.get()

                    self.running -= 1

        debug("Stopping bgtask thread #%d" % number)
Ejemplo n.º 6
0
 def clean_up(self):
     # uses get_session() because this runs in a background thread and needs to
     # setup its own connection
     session = get_session()
     session.query(Session).filter(Session.expiration_time < self.now()).delete()
     session.commit()