Beispiel #1
0
def upgrade(ver, session):
    if ver is None:
        log.info("Converting seen imdb_url to imdb_id for seen movies.")
        field_table = table_schema("seen_field", session)
        for row in session.execute(select([field_table.c.id, field_table.c.value], field_table.c.field == "imdb_url")):
            new_values = {"field": "imdb_id", "value": extract_id(row["value"])}
            session.execute(update(field_table, field_table.c.id == row["id"], new_values))
        ver = 1
    if ver == 1:
        field_table = table_schema("seen_field", session)
        log.info("Adding index to seen_field table.")
        Index("ix_seen_field_seen_entry_id", field_table.c.seen_entry_id).create(bind=session.bind)
        ver = 2
    if ver == 2:
        log.info("Adding local column to seen_entry table")
        table_add_column("seen_entry", "local", Boolean, session, default=False)
        ver = 3
    if ver == 3:
        # setting the default to False in the last migration was broken, fix the data
        log.info("Repairing seen table")
        entry_table = table_schema("seen_entry", session)
        session.execute(update(entry_table, entry_table.c.local == None, {"local": False}))
        ver = 4

    return ver
def upgrade(ver, session):
    if ver is 0:
        table_names = [
            "rottentomatoes_actors",
            "rottentomatoes_alternate_ids",
            "rottentomatoes_directors",
            "rottentomatoes_genres",
            "rottentomatoes_links",
            "rottentomatoes_movie_actors",
            "rottentomatoes_movie_directors",
            "rottentomatoes_movie_genres",
            "rottentomatoes_movies",
            "rottentomatoes_posters",
            "rottentomatoes_releasedates",
            "rottentomatoes_search_results",
        ]
        tables = [table_schema(name, session) for name in table_names]
        for table in tables:
            session.execute(table.delete())
        table_add_column("rottentomatoes_actors", "rt_id", String, session)
        ver = 1
    if ver is 1:
        table = table_schema("rottentomatoes_search_results", session)
        session.execute(sql.delete(table, table.c.movie_id == None))
        ver = 2
    return ver
Beispiel #3
0
def upgrade(ver, session):
    if ver is None:
        # Make sure there is no data we can't load in the backlog table
        backlog_table = table_schema('backlog', session)
        try:
            for item in session.query('entry').select_from(backlog_table).all():
                pickle.loads(item.entry)
        except (ImportError, TypeError):
            # If there were problems, we can drop the data.
            log.info('Backlog table contains unloadable data, clearing old data.')
            session.execute(backlog_table.delete())
        ver = 0
    if ver == 0:
        backlog_table = table_schema('backlog', session)
        log.info('Creating index on backlog table.')
        Index('ix_backlog_feed_expire', backlog_table.c.feed, backlog_table.c.expire).create(bind=session.bind)
        ver = 1
    if ver == 1:
        table = table_schema('backlog', session)
        table_add_column(table, 'json', Unicode, session)
        # Make sure we get the new schema with the added column
        table = table_schema('backlog', session)
        for row in session.execute(select([table.c.id, table.c.entry])):
            try:
                p = pickle.loads(row['entry'])
                session.execute(table.update().where(table.c.id == row['id']).values(
                    json=json.dumps(p, encode_datetime=True)))
            except KeyError as e:
                log.error('Unable error upgrading backlog pickle object due to %s' % str(e))

        ver = 2
    return ver
Beispiel #4
0
def upgrade(ver, session):
    if ver is None:
        log.info('Converting seen imdb_url to imdb_id for seen movies.')
        field_table = table_schema('seen_field', session)
        for row in session.execute(select([field_table.c.id, field_table.c.value], field_table.c.field == 'imdb_url')):
            new_values = {'field': 'imdb_id', 'value': extract_id(row['value'])}
            session.execute(update(field_table, field_table.c.id == row['id'], new_values))
        ver = 1
    if ver == 1:
        field_table = table_schema('seen_field', session)
        log.info('Adding index to seen_field table.')
        Index('ix_seen_field_seen_entry_id', field_table.c.seen_entry_id).create(bind=session.bind)
        ver = 2
    if ver == 2:
        log.info('Adding local column to seen_entry table')
        table_add_column('seen_entry', 'local', Boolean, session, default=False)
        ver = 3
    if ver == 3:
        # setting the default to False in the last migration was broken, fix the data
        log.info('Repairing seen table')
        entry_table = table_schema('seen_entry', session)
        session.execute(update(entry_table, entry_table.c.local == None, {'local': False}))
        ver = 4

    return ver
def upgrade(ver, session):
    if ver is None:
        # Upgrade to version 0 was a failed attempt at cleaning bad entries from our table, better attempt in ver 1
        ver = 0
    if ver == 0:
        # Remove any values that are not loadable.
        table = table_schema('simple_persistence', session)
        for row in session.execute(select([table.c.id, table.c.plugin, table.c.key, table.c.value])):
            try:
                pickle.loads(row['value'])
            except Exception as e:
                log.warning('Couldn\'t load %s:%s removing from db: %s' % (row['plugin'], row['key'], e))
                session.execute(table.delete().where(table.c.id == row['id']))
        ver = 1
    if ver == 1:
        log.info('Creating index on simple_persistence table.')
        create_index('simple_persistence', session, 'feed', 'plugin', 'key')
        ver = 2
    if ver == 2 or ver == 3:
        table = table_schema('simple_persistence', session)
        table_add_column(table, 'json', Unicode, session)
        # Make sure we get the new schema with the added column
        table = table_schema('simple_persistence', session)
        for row in session.execute(select([table.c.id, table.c.value])):
            try:
                p = pickle.loads(row['value'])
                session.execute(table.update().where(table.c.id == row['id']).values(
                    json=json.dumps(p, encode_datetime=True)))
            except KeyError as e:
                log.error('Unable error upgrading simple_persistence pickle object due to %s' % str(e))

        ver = 4
    return ver
Beispiel #6
0
def upgrade(ver, session):
    if ver is None:
        # Upgrade to version 0 was a failed attempt at cleaning bad entries from our table, better attempt in ver 1
        ver = 1
    if ver == 1:
        table = table_schema('delay', session)
        table_add_column(table, 'json', Unicode, session)
        # Make sure we get the new schema with the added column
        table = table_schema('delay', session)
        failures = 0
        for row in session.execute(select([table.c.id, table.c.entry])):
            try:
                p = pickle.loads(row['entry'])
                session.execute(
                    table.update()
                    .where(table.c.id == row['id'])
                    .values(json=json.dumps(p, encode_datetime=True))
                )
            except (KeyError, ImportError):
                failures += 1
        if failures > 0:
            log.error(
                'Error upgrading %s pickle objects. Some delay information has been lost.'
                % failures
            )
        ver = 2

    return ver
Beispiel #7
0
def upgrade(ver, session):
    if ver is None:
        log.info('Converting seen imdb_url to imdb_id for seen movies.')
        field_table = table_schema('seen_field', session)
        for row in session.execute(select([field_table.c.id, field_table.c.value], field_table.c.field == 'imdb_url')):
            new_values = {'field': 'imdb_id', 'value': extract_id(row['value'])}
            session.execute(update(field_table, field_table.c.id == row['id'], new_values))
        ver = 1
    if ver == 1:
        field_table = table_schema('seen_field', session)
        log.info('Adding index to seen_field table.')
        Index('ix_seen_field_seen_entry_id', field_table.c.seen_entry_id).create(bind=session.bind)
        ver = 2
    return ver
Beispiel #8
0
def upgrade(ver, session):
    if ver == 0:
        table = table_schema('input_cache_entry', session)
        table_add_column(table, 'json', Unicode, session)
        # Make sure we get the new schema with the added column
        table = table_schema('input_cache_entry', session)
        for row in session.execute(select([table.c.id, table.c.entry])):
            try:
                p = pickle.loads(row['entry'])
                session.execute(table.update().where(table.c.id == row['id']).values(
                    json=json.dumps(p, encode_datetime=True)))
            except KeyError as e:
                log.error('Unable error upgrading input_cache pickle object due to %s' % str(e))
        ver = 1
    return ver
Beispiel #9
0
def upgrade(ver, session):
    if ver is None:
        log.info('Adding index to md5sum column of log_once table.')
        table = table_schema('log_once', session)
        Index('log_once_md5sum', table.c.md5sum, unique=True).create()
        ver = 0
    return ver
Beispiel #10
0
def db_upgrade(ver, session):
    if ver == 0:
        log.info('Recreating scheduler table. All schedules will trigger again after this upgrade.')
        table = table_schema('scheduler_triggers', session)
        table.drop()
        Base.metadata.create_all(bind=session.bind)
    return DB_SCHEMA_VER
Beispiel #11
0
def upgrade(ver, session):
    if ver is None:
        # get rid of old index
        aet = table_schema('archive_entry', session)
        old_index = get_index_by_name(aet, 'archive_feed_title')
        if old_index is not None:
            log.info('Dropping legacy index (may take a while) ...')
            old_index.drop()
            # create new index by title, url
        new_index = get_index_by_name(Base.metadata.tables['archive_entry'], 'ix_archive_title_url')
        if new_index:
            log.info('Creating new index (may take a while) ...')
            new_index.create(bind=session.connection())
        else:
            # maybe removed from the model by later migrations?
            log.error('Unable to create index `ix_archive_title_url`, removed from the model?')
            # TODO: nag about this ?
        # This is safe as long as we don't delete the model completely :)
        # But generally never use Declarative Models in migrate!
        if session.query(ArchiveEntry).first():
            log.critical('----------------------------------------------')
            log.critical('You should run `--archive consolidate` ')
            log.critical('one time when you have time, it may take hours')
            log.critical('----------------------------------------------')
        ver = 0
    return ver
Beispiel #12
0
def upgrade(ver, session):
    if ver is None:
        log.info('Converting seen imdb_url to imdb_id for seen movies.')
        field_table = table_schema('seen_field', session)
        for row in session.execute(select([field_table.c.id, field_table.c.value], field_table.c.field == 'imdb_url')):
            session.execute(update(field_table, field_table.c.id == row['id'],
                    {'field': 'imdb_id', 'value': extract_id(row['value'])}))
        ver = 1
    return ver
Beispiel #13
0
def upgrade(ver, session):
    if ver is None:
        # Make sure there is no data we can't load in the backlog table
        backlog_table = table_schema('backlog', session)
        try:
            for item in session.query('entry').select_from(backlog_table).all():
                pickle.loads(item.entry)
        except (ImportError, TypeError):
            # If there were problems, we can drop the data.
            log.info('Backlog table contains unloadable data, clearing old data.')
            session.execute(backlog_table.delete())
        ver = 0
    if ver == 0:
        backlog_table = table_schema('backlog', session)
        log.info('Creating index on backlog table.')
        Index('ix_backlog_feed_expire', backlog_table.c.feed, backlog_table.c.expire).create(bind=session.bind)
        ver = 1
    return ver
Beispiel #14
0
def upgrade(ver, session):
    if ver is None:
        if table_exists('episode_qualities', session):
            log.info('Series database format is too old to upgrade, dropping and recreating tables.')
            # Drop the deprecated data
            drop_tables(['series', 'series_episodes', 'episode_qualities'], session)
            # Create new tables from the current models
            Base.metadata.create_all(bind=session.bind)
        # Upgrade episode_releases table to have a proper count and seed it with appropriate numbers
        columns = table_columns('episode_releases', session)
        if not 'proper_count' in columns:
            log.info('Upgrading episode_releases table to have proper_count column')
            table_add_column('episode_releases', 'proper_count', Integer, session)
            release_table = table_schema('episode_releases', session)
            for row in session.execute(select([release_table.c.id, release_table.c.title])):
                # Recalculate the proper_count from title for old episodes
                proper_count = len([part for part in re.split('[\W_]+', row['title'].lower())
                                    if part in SeriesParser.propers])
                session.execute(update(release_table, release_table.c.id == row['id'], {'proper_count': proper_count}))
        ver = 0
    if ver == 0:
        log.info('Migrating first_seen column from series_episodes to episode_releases table.')
        # Create the column in episode_releases
        table_add_column('episode_releases', 'first_seen', DateTime, session)
        # Seed the first_seen value for all the past releases with the first_seen of their episode.
        episode_table = table_schema('series_episodes', session)
        release_table = table_schema('episode_releases', session)
        for row in session.execute(select([episode_table.c.id, episode_table.c.first_seen])):
            session.execute(update(release_table, release_table.c.episode_id == row['id'],
                                   {'first_seen': row['first_seen']}))
        ver = 1
    if ver == 1:
        log.info('Adding `identified_by` column to series table.')
        table_add_column('series', 'identified_by', String, session)
        ver = 2
    if ver == 2:
        release_table = table_schema('episode_releases', session)
        log.info('Creating index on episode_releases table.')
        Index('ix_episode_releases_episode_id', release_table.c.episode_id).create(bind=session.bind)
        ver = 3

    return ver
Beispiel #15
0
def upgrade(ver, session):
    if ver == 0:
        # Translate old qualities into new quality requirements
        movie_table = table_schema('movie_queue', session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            # Webdl quality no longer has dash
            new_qual = row['quality'].replace('web-dl', 'webdl')
            if new_qual.lower() != 'any':
                # Old behavior was to get specified quality or greater, approximate that with new system
                new_qual = ' '.join(qual + '+' for qual in new_qual.split(' '))
            session.execute(update(movie_table, movie_table.c.id == row['id'],
                                   {'quality': new_qual}))
        ver = 1
    if ver == 1:
        # Bad upgrade left some qualities as 'ANY+'
        movie_table = table_schema('movie_queue', session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            if row['quality'].lower() == 'any+':
                session.execute(update(movie_table, movie_table.c.id == row['id'],
                                       {'quality': 'ANY'}))
        ver = 2
    if ver == 2:
        from flexget.utils.imdb import ImdbParser
        # Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729
        movie_table = table_schema('movie_queue', session)
        queue_base_table = table_schema('queue', session)
        query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title])
        query = query.where(movie_table.c.id == queue_base_table.c.id)
        for row in session.execute(query):
            if row['imdb_id'] and (not row['title'] or row['title'] == 'None' or '\n' in row['title']):
                log.info('Fixing movie_queue title for %s' % row['imdb_id'])
                parser = ImdbParser()
                parser.parse(row['imdb_id'])
                if parser.name:
                    session.execute(update(queue_base_table, queue_base_table.c.id == row['id'],
                                           {'title': parser.name}))
        ver = 3
    if ver == 3:
        # adding queue_name column to movie_queue table and setting initial value to default)
        table_add_column('movie_queue', 'queue_name', Unicode, session, default='default')
        ver = 4
    return ver
Beispiel #16
0
def upgrade(ver, session):
    if ver is None:
        # Upgrade to version 0 was a failed attempt at cleaning bad entries from our table, better attempt in ver 1
        ver = 1
    if ver == 1:
        table = table_schema('delay', session)
        table_add_column(table, 'json', Unicode, session)
        # Make sure we get the new schema with the added column
        table = table_schema('delay', session)
        for row in session.execute(select([table.c.id, table.c.entry])):
            try:
                p = pickle.loads(row['entry'])
                session.execute(table.update().where(table.c.id == row['id']).values(
                    json=json.dumps(p, encode_datetime=True)))
            except KeyError as e:
                log.error('Unable error upgrading delay pickle object due to %s' % str(e))

        ver = 2

    return ver
Beispiel #17
0
def upgrade(ver, session):
    if ver == 0:
        # Translate old qualities into new quality requirements
        movie_table = table_schema("movie_queue", session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            # Webdl quality no longer has dash
            new_qual = row["quality"].replace("web-dl", "webdl")
            if new_qual.lower() != "any":
                # Old behavior was to get specified quality or greater, approximate that with new system
                new_qual = " ".join(qual + "+" for qual in new_qual.split(" "))
            session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": new_qual}))
        ver = 1
    if ver == 1:
        # Bad upgrade left some qualities as 'ANY+'
        movie_table = table_schema("movie_queue", session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            if row["quality"].lower() == "any+":
                session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": "ANY"}))
        ver = 2
    return ver
Beispiel #18
0
def upgrade(ver, session):
    if ver == 0:
        series_table = table_schema('tvrage_series', session)
        for row in session.execute(select([series_table.c.id, series_table.c.genres])):
            # Recalculate the proper_count from title for old episodes
            new_genres = row['genres']
            if new_genres:
                new_genres = row['genres'].replace(',', '|')
            session.execute(update(series_table, series_table.c.id == row['id'], {'genres': new_genres}))
        ver = 1
    return ver
Beispiel #19
0
def upgrade(ver, session):
    if ver == 0:
        # Translate old qualities into new quality requirements
        movie_table = table_schema('movie_queue', session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            # Webdl quality no longer has dash
            new_qual = row['quality'].replace('web-dl', 'webdl')
            if new_qual.lower() != 'any':
                # Old behavior was to get specified quality or greater, approximate that with new system
                new_qual = ' '.join(qual + '+' for qual in new_qual.split(' '))
            session.execute(update(movie_table, movie_table.c.id == row['id'],
                    {'quality': new_qual}))
        ver = 1
    if ver == 1:
        # Bad upgrade left some qualities as 'ANY+'
        movie_table = table_schema('movie_queue', session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            if row['quality'].lower() == 'any+':
                session.execute(update(movie_table, movie_table.c.id == row['id'],
                        {'quality': 'ANY'}))
        ver = 2
    return ver
Beispiel #20
0
def upgrade(ver, session):
    if ver is None:
        # Make sure there is no data we can't load in the backlog table
        try:
            backlog_table = table_schema('backlog', session)
            for item in session.query('entry').select_from(backlog_table).all():
                pickle.loads(item.entry)
        except ImportError:
            # If there were problems, we can drop the data.
            log.info('Backlog table contains unloadable data, clearing old data.')
            session.execute(backlog_table.delete())
        ver = 0
    return ver
Beispiel #21
0
def upgrade(ver, session):
    if ver is 0:
        table_names = ['rottentomatoes_actors', 'rottentomatoes_alternate_ids',
        'rottentomatoes_directors', 'rottentomatoes_genres', 'rottentomatoes_links',
        'rottentomatoes_movie_actors', 'rottentomatoes_movie_directors',
        'rottentomatoes_movie_genres', 'rottentomatoes_movies', 'rottentomatoes_posters',
        'rottentomatoes_releasedates', 'rottentomatoes_search_results']
        tables = [table_schema(name, session) for name in table_names]
        for table in tables:
            session.execute(table.delete())
        table_add_column('rottentomatoes_actors', 'rt_id', String, session)
        ver = 1
    return ver
Beispiel #22
0
def upgrade(ver, session):
    if ver == 0:
        # Translate old qualities into new quality requirements
        movie_table = table_schema("movie_queue", session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            # Webdl quality no longer has dash
            new_qual = row["quality"].replace("web-dl", "webdl")
            if new_qual.lower() != "any":
                # Old behavior was to get specified quality or greater, approximate that with new system
                new_qual = " ".join(qual + "+" for qual in new_qual.split(" "))
            session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": new_qual}))
        ver = 1
    if ver == 1:
        # Bad upgrade left some qualities as 'ANY+'
        movie_table = table_schema("movie_queue", session)
        for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
            if row["quality"].lower() == "any+":
                session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": "ANY"}))
        ver = 2
    if ver == 2:
        from flexget.utils.imdb import ImdbParser

        # Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729
        movie_table = table_schema("movie_queue", session)
        queue_base_table = table_schema("queue", session)
        query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title])
        query = query.where(movie_table.c.id == queue_base_table.c.id)
        for row in session.execute(query):
            if row["imdb_id"] and (not row["title"] or row["title"] == "None" or "\n" in row["title"]):
                log.info("Fixing movie_queue title for %s" % row["imdb_id"])
                parser = ImdbParser()
                parser.parse(row["imdb_id"])
                if parser.name:
                    session.execute(
                        update(queue_base_table, queue_base_table.c.id == row["id"], {"title": parser.name})
                    )
        ver = 3
    return ver
Beispiel #23
0
def upgrade(ver, session):
    if ver is None:
        columns = table_columns('imdb_movies', session)
        if not 'photo' in columns:
            log.info('Adding photo column to imdb_movies table.')
            table_add_column('imdb_movies', 'photo', String, session)
        if not 'updated' in columns:
            log.info('Adding updated column to imdb_movies table.')
            table_add_column('imdb_movies', 'updated', DateTime, session)
        if not 'mpaa_rating' in columns:
            log.info('Adding mpaa_rating column to imdb_movies table.')
            table_add_column('imdb_movies', 'mpaa_rating', String, session)
        ver = 0
    if ver == 0:
        # create indexes retrospectively (~r2563)
        log.info(
            'Adding imdb indexes delivering up to 20x speed increase \o/ ...')
        indexes = [
            get_index_by_name(actors_table, 'ix_imdb_movie_actors'),
            get_index_by_name(genres_table, 'ix_imdb_movie_genres'),
            get_index_by_name(directors_table, 'ix_imdb_movie_directors')
        ]
        for index in indexes:
            if index is None:
                log.critical('Index adding failure!')
                continue
            log.info('Creating index %s ...' % index.name)
            index.create(bind=session.connection())
        ver = 1
    if ver == 1:
        # http://flexget.com/ticket/1399
        log.info('Adding prominence column to imdb_movie_languages table.')
        table_add_column('imdb_movie_languages', 'prominence', Integer,
                         session)
        ver = 2
    if ver == 2:
        log.info(
            'Adding search result timestamp and clearing all previous results.'
        )
        table_add_column('imdb_search', 'queried', DateTime, session)
        search_table = table_schema('imdb_search', session)
        session.execute(delete(search_table, search_table.c.fails))
        ver = 3
    if ver == 3:
        log.info(
            'Adding original title column, cached data will not have this information'
        )
        table_add_column('imdb_movies', 'original_title', Unicode, session)
        ver = 4
    return ver
Beispiel #24
0
def upgrade(ver, session):
    if ver is None:
        # Upgrade to version 0 was a failed attempt at cleaning bad entries from our table, better attempt in ver 1
        ver = 0
    if ver == 0:
        # Remove any values that are not loadable.
        table = table_schema('simple_persistence', session)
        for row in session.execute(select([table.c.id, table.c.plugin, table.c.key, table.c.value])):
            try:
                p = pickle.loads(row['value'])
            except Exception, e:
                log.warning('Couldn\'t load %s:%s removing from db: %s' % (row['plugin'], row['key'], e))
                session.execute(table.delete().where(table.c.id == row['id']))
        ver = 1
Beispiel #25
0
def upgrade(ver, session):
    if ver is None:
        log.info('Adding columns to tmdb cache table, marking current cache as expired.')
        table_add_column('tmdb_movies', 'runtime', Integer, session)
        table_add_column('tmdb_movies', 'tagline', Unicode, session)
        table_add_column('tmdb_movies', 'budget', Integer, session)
        table_add_column('tmdb_movies', 'revenue', Integer, session)
        table_add_column('tmdb_movies', 'homepage', String, session)
        table_add_column('tmdb_movies', 'trailer', String, session)
        # Mark all cached movies as expired, so new fields get populated next lookup
        movie_table = table_schema('tmdb_movies', session)
        session.execute(movie_table.update(values={'updated': datetime(1970, 1, 1)}))
        ver = 0
    return ver
Beispiel #26
0
def upgrade(ver, session):
    if ver is None:
        log.info('Converting seen imdb_url to imdb_id for seen movies.')
        field_table = table_schema('seen_field', session)
        for row in session.execute(
                select([field_table.c.id, field_table.c.value],
                       field_table.c.field == 'imdb_url')):
            new_values = {
                'field': 'imdb_id',
                'value': extract_id(row['value'])
            }
            session.execute(
                update(field_table, field_table.c.id == row['id'], new_values))
        ver = 1
    if ver == 1:
        field_table = table_schema('seen_field', session)
        log.info('Adding index to seen_field table.')
        Index('ix_seen_field_seen_entry_id',
              field_table.c.seen_entry_id).create(bind=session.bind)
        ver = 2
    if ver == 2:
        log.info('Adding local column to seen_entry table')
        table_add_column('seen_entry',
                         'local',
                         Boolean,
                         session,
                         default=False)
        ver = 3
    if ver == 3:
        # setting the default to False in the last migration was broken, fix the data
        log.info('Repairing seen table')
        entry_table = table_schema('seen_entry', session)
        session.execute(
            update(entry_table, entry_table.c.local == None, {'local': False}))
        ver = 4

    return ver
Beispiel #27
0
def upgrade(ver, session):
    if ver is None:
        # add count column
        table_add_column('failed', 'count', Integer, session, default=1)
        ver = 0
    if ver == 0:
        # define an index
        log.info('Adding database index ...')
        failed = table_schema('failed', session)
        Index('failed_title_url', failed.c.title, failed.c.url, failed.c.count).create()
        ver = 1
    if ver == 1:
        table_add_column('failed', 'reason', Unicode, session)
        ver = 2
    return ver
Beispiel #28
0
def upgrade(ver, session):
    if ver == 0:
        series_table = table_schema('tvrage_series', session)
        for row in session.execute(
                select([series_table.c.id, series_table.c.genres])):
            # Recalculate the proper_count from title for old episodes
            new_genres = row['genres']
            if new_genres:
                new_genres = row['genres'].replace(',', '|')
            session.execute(
                update(series_table, series_table.c.id == row['id'],
                       {'genres': new_genres}))
        ver = 1
    if ver == 1:
        raise db_schema.UpgradeImpossible
    return ver
Beispiel #29
0
def migrate_imdb_queue(manager):
    """If imdb_queue table is found, migrate the data to movie_queue"""
    session = Session()
    try:
        if table_exists('imdb_queue', session):
            log.info('Migrating imdb_queue items to movie_queue')
            old_table = table_schema('imdb_queue', session)
            for row in session.execute(old_table.select()):
                try:
                    queue_add(imdb_id=row['imdb_id'], quality=row['quality'], session=session)
                except QueueError as e:
                    log.error('Unable to migrate %s from imdb_queue to movie_queue' % row['title'])
            old_table.drop()
            session.commit()
    finally:
        session.close()
Beispiel #30
0
def upgrade(ver, session):
    if ver == 0:
        table = table_schema('pending_entries', session)
        for row in session.execute(select([table.c.id, table.c.json])):
            if not row['json']:
                # Seems there could be invalid data somehow. See #2590
                continue
            data = json.loads(row['json'], decode_datetime=True)
            # If title looked like a date, make sure it's a string
            title = str(data.pop('title'))
            e = Entry(title=title, **data)
            session.execute(table.update().where(
                table.c.id == row['id']).values(json=serialization.dumps(e)))

        ver = 1
    return ver
Beispiel #31
0
def upgrade(ver, session):
    if ver is None:
        # add count column
        table_add_column('failed', 'count', Integer, session, default=1)
        ver = 0
    if ver == 0:
        # define an index
        log.info('Adding database index ...')
        failed = table_schema('failed', session)
        Index('failed_title_url', failed.c.title, failed.c.url,
              failed.c.count).create()
        ver = 1
    if ver == 1:
        table_add_column('failed', 'reason', Unicode, session)
        ver = 2
    return ver
Beispiel #32
0
def migrate_imdb_queue(manager):
    """If imdb_queue table is found, migrate the data to movie_queue"""
    session = Session()
    try:
        if table_exists('imdb_queue', session):
            log.info('Migrating imdb_queue items to movie_queue')
            old_table = table_schema('imdb_queue', session)
            for row in session.execute(old_table.select()):
                try:
                    queue_add(imdb_id=row['imdb_id'], quality=row['quality'], session=session)
                except QueueError as e:
                    log.error('Unable to migrate %s from imdb_queue to movie_queue' % row['title'])
            old_table.drop()
            session.commit()
    finally:
        session.close()
Beispiel #33
0
def upgrade(ver, session):
    if ver is None:
        # Upgrade to version 0 was a failed attempt at cleaning bad entries from our table, better attempt in ver 1
        ver = 0
    if ver == 0:
        # Remove any values that are not loadable.
        table = table_schema('simple_persistence', session)
        for row in session.execute(
                select(
                    [table.c.id, table.c.plugin, table.c.key, table.c.value])):
            try:
                p = pickle.loads(row['value'])
            except Exception, e:
                log.warning('Couldn\'t load %s:%s removing from db: %s' %
                            (row['plugin'], row['key'], e))
                session.execute(table.delete().where(table.c.id == row['id']))
        ver = 1
Beispiel #34
0
def upgrade(ver, session):
    if ver is None:
        log.info(
            'Adding columns to tmdb cache table, marking current cache as expired.'
        )
        table_add_column('tmdb_movies', 'runtime', Integer, session)
        table_add_column('tmdb_movies', 'tagline', Unicode, session)
        table_add_column('tmdb_movies', 'budget', Integer, session)
        table_add_column('tmdb_movies', 'revenue', Integer, session)
        table_add_column('tmdb_movies', 'homepage', String, session)
        table_add_column('tmdb_movies', 'trailer', String, session)
        # Mark all cached movies as expired, so new fields get populated next lookup
        movie_table = table_schema('tmdb_movies', session)
        session.execute(
            movie_table.update(values={'updated': datetime(1970, 1, 1)}))
        ver = 0
    return ver
Beispiel #35
0
def upgrade(ver, session):
    if ver is None:
        # add count column
        table_add_column("failed", "count", Integer, session, default=1)
        ver = 0
    if ver == 0:
        # define an index
        log.info("Adding database index ...")
        failed = table_schema("failed", session)
        Index("failed_title_url", failed.c.title, failed.c.url, failed.c.count).create()
        ver = 1
    if ver == 1:
        table_add_column("failed", "reason", Unicode, session)
        ver = 2
    if ver == 2:
        table_add_column("failed", "retry_time", DateTime, session)
        ver = 3
    return ver
Beispiel #36
0
def reset_schema(plugin, session=None):
    """
    Removes all tables from given plugin from the database, as well as removing current stored schema number.

    :param plugin: The plugin whose schema should be reset
    """
    if plugin not in plugin_schemas:
        raise ValueError('The plugin %s has no stored schema to reset.' % plugin)
    table_names = plugin_schemas[plugin].get('tables', [])
    tables = [table_schema(name, session) for name in table_names]
    # Remove the plugin's tables
    for table in tables:
        table.drop()
    # Remove the plugin from schema table
    session.query(PluginSchema).filter(PluginSchema.plugin == plugin).delete()
    # Create new empty tables
    Base.metadata.create_all(bind=session.bind)
    session.commit()
Beispiel #37
0
def upgrade(ver, session):
    if ver is None:
        columns = table_columns('imdb_movies', session)
        if not 'photo' in columns:
            log.info('Adding photo column to imdb_movies table.')
            table_add_column('imdb_movies', 'photo', String, session)
        if not 'updated' in columns:
            log.info('Adding updated column to imdb_movies table.')
            table_add_column('imdb_movies', 'updated', DateTime, session)
        if not 'mpaa_rating' in columns:
            log.info('Adding mpaa_rating column to imdb_movies table.')
            table_add_column('imdb_movies', 'mpaa_rating', String, session)
        ver = 0
    if ver == 0:
        # create indexes retrospectively (~r2563)
        log.info(
            'Adding imdb indexes delivering up to 20x speed increase \o/ ...')
        indexes = [
            get_index_by_name(actors_table, 'ix_imdb_movie_actors'),
            get_index_by_name(genres_table, 'ix_imdb_movie_genres'),
            get_index_by_name(directors_table, 'ix_imdb_movie_directors')
        ]
        for index in indexes:
            if index is None:
                log.critical('Index adding failure!')
                continue
            log.info('Creating index %s ...' % index.name)
            index.create(bind=session.connection())
        ver = 1
    if ver == 1:
        # http://flexget.com/ticket/1399
        log.info('Adding prominence column to imdb_movie_languages table.')
        table_add_column('imdb_movie_languages', 'prominence', Integer,
                         session)
        ver = 2
    if ver == 2:
        log.info(
            'Adding search result timestamp and clearing all previous results.'
        )
        table_add_column('imdb_search', 'queried', DateTime, session)
        search_table = table_schema('imdb_search', session)
        session.execute(delete(search_table, search_table.c.fails))
        ver = 3
    return ver
Beispiel #38
0
def reset_schema(plugin, session=None):
    """
    Removes all tables from given plugin from the database, as well as removing current stored schema number.

    :param plugin: The plugin whose schema should be reset
    """
    if plugin not in plugin_schemas:
        raise ValueError('The plugin %s has no stored schema to reset.' %
                         plugin)
    table_names = plugin_schemas[plugin].get('tables', [])
    tables = [table_schema(name, session) for name in table_names]
    # Remove the plugin's tables
    for table in tables:
        table.drop()
    # Remove the plugin from schema table
    session.query(PluginSchema).filter(PluginSchema.plugin == plugin).delete()
    # Create new empty tables
    Base.metadata.create_all(bind=session.bind)
    session.commit()
Beispiel #39
0
def reset_schema(plugin, session=None):
    """
    Removes all tables from given plugin from the database, as well as removing current stored schema number.

    :param plugin: The plugin whose schema should be reset
    """
    if plugin not in plugin_schemas:
        raise ValueError('The plugin %s has no stored schema to reset.' % plugin)
    table_names = plugin_schemas[plugin].get('tables', [])
    tables = [table_schema(name, session) for name in table_names]
    # Remove the plugin's tables
    for table in tables:
        try:
            table.drop()
        except OperationalError as e:
            if 'no such table' in str(e):
                continue
            raise e    # Remove the plugin from schema table
    session.query(PluginSchema).filter(PluginSchema.plugin == plugin).delete()
    # We need to commit our current changes to close the session before calling create_all
    session.commit()
    # Create new empty tables
    Base.metadata.create_all(bind=session.bind)