示例#1
0
    def imdbMatch(self, url, imdbId):
        if get_imdb(url) == imdbId:
            return True

        if url[:4] == 'http':
            try:
                cache_key = md5(url)
                data = self.getCache(cache_key, url)
            except IOError:
                log.error('Failed to open %s.', url)
                return False

            return get_imdb(data) == imdbId

        return False
示例#2
0
    def get(self, media_id):

        try:
            db = get_db()

            imdb_id = get_imdb(str(media_id))

            if imdb_id:
                media = db.get('media', 'imdb-%s' % imdb_id,
                               with_doc=True)['doc']
            else:
                media = db.get('id', media_id)

            if media:

                # Attach category
                try:
                    media['category'] = db.get('id', media.get('category_id'))
                except:
                    pass

                media['releases'] = fire_event('release.for_media',
                                               media['_id'],
                                               single=True)

            return media

        except (RecordNotFound, RecordDeleted):
            log.error('Media with id "%s" not found', media_id)
        except:
            raise
示例#3
0
    def search(self, q = '', types = None, **kwargs):

        # Make sure types is the correct instance
        if isinstance(types, str):
            types = [types]
        elif isinstance(types, (list, tuple, set)):
            types = list(types)

        imdb_identifier = get_imdb(q)

        if not types:
            if imdb_identifier:
                result = fire_event('movie.info', identifier=imdb_identifier, merge=True)
                result = {result['type']: [result]}
            else:
                result = fire_event('info.search', q=q, merge=True)
        else:
            result = {}
            for media_type in types:
                if imdb_identifier:
                    result[media_type] = fire_event('%s.info' % media_type, identifier=imdb_identifier)
                else:
                    result[media_type] = fire_event('%s.search' % media_type, q=q)

        return merge_dictionaries({
            'success': True,
        }, result)
示例#4
0
    def getMovie(self, url):

        cookie = {'Cookie': 'c*k=1'}

        try:
            data = self.urlopen(url, headers = cookie)
        except:
            return

        return self.getInfo(get_imdb(data))
示例#5
0
    def correctRelease(self, nzb=None, media=None, quality=None, **kwargs):

        if media.get('type') != 'movie': return

        media_title = fire_event('searcher.get_search_title',
                                 media,
                                 single=True)

        imdb_results = kwargs.get('imdb_results', False)
        retention = Env.setting('retention', section='nzb')

        if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
            log.info2(
                'Wrong: Outside retention, age is %s, needs %s or lower: %s',
                (nzb['age'], retention, nzb['name']))
            return False

        # Check for required and ignored words
        if not fire_event(
                'searcher.correct_words', nzb['name'], media, single=True):
            return False

        preferred_quality = quality if quality else fire_event(
            'quality.single', identifier=quality['identifier'], single=True)

        # Contains lower quality string
        contains_other = fire_event('searcher.contains_other_quality',
                                    nzb,
                                    movie_year=media['info']['year'],
                                    preferred_quality=preferred_quality,
                                    single=True)
        if contains_other and isinstance(contains_other, dict):
            log.info2(
                'Wrong: %s, looking for %s, found %s',
                (nzb['name'], quality['label'], [x for x in contains_other]
                 if contains_other else 'no quality'))
            return False

        # Contains lower quality string
        if not fire_event('searcher.correct_3d',
                          nzb,
                          preferred_quality=preferred_quality,
                          single=True):
            log.info2(
                'Wrong: %s, %slooking for %s in 3D',
                (nzb['name'],
                 ('' if preferred_quality['custom'].get('3d') else 'NOT '),
                 quality['label']))
            return False

        # File to small
        if nzb['size'] and try_int(preferred_quality['size_min']) > try_int(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_min']))
            return False

        # File to large
        if nzb['size'] and try_int(preferred_quality['size_max']) < try_int(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_max']))
            return False

        # Provider specific functions
        get_more = nzb.get('get_more_info')
        if get_more:
            get_more(nzb)

        extra_check = nzb.get('extra_check')
        if extra_check and not extra_check(nzb):
            return False

        if imdb_results:
            return True

        # Check if nzb contains imdb link
        if get_imdb(nzb.get('description', '')) == get_identifier(media):
            return True

        for raw_title in media['info']['titles']:
            for movie_title in possible_titles(raw_title):
                movie_words = re.split('\W+', simplify_string(movie_title))

                if fire_event('searcher.correct_name',
                              nzb['name'],
                              movie_title,
                              single=True):
                    # if no IMDB link, at least check year range 1
                    if len(movie_words) > 2 and fire_event(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            1,
                            single=True):
                        return True

                    # if no IMDB link, at least check year
                    if len(movie_words) <= 2 and fire_event(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            0,
                            single=True):
                        return True

        log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'",
                 (nzb['name'], media_title, media['info']['year']))
        return False
示例#6
0
 def getMovie(self, url):
     return self.getInfo(get_imdb(url))
示例#7
0
    def determineMedia(self, group, release_download=None):

        # Get imdb id from downloader
        imdb_id = release_download and release_download.get('imdb_id')
        if imdb_id:
            log.debug('Found movie via imdb id from it\'s download id: %s',
                      release_download.get('imdb_id'))

        files = group['files']

        # Check for CP(imdb_id) string in the file paths
        if not imdb_id:
            for cur_file in files['movie']:
                imdb_id = self.getCPImdb(cur_file)
                if imdb_id:
                    log.debug('Found movie via CP tag: %s', cur_file)
                    break

        # Check and see if nfo contains the imdb-id
        nfo_file = None
        if not imdb_id:
            try:
                for nf in files['nfo']:
                    imdb_id = get_imdb(nf, check_inside=True)
                    if imdb_id:
                        log.debug('Found movie via nfo file: %s', nf)
                        nfo_file = nf
                        break
            except:
                pass

        # Check and see if filenames contains the imdb-id
        if not imdb_id:
            try:
                for filetype in files:
                    for filetype_file in files[filetype]:
                        imdb_id = get_imdb(filetype_file)
                        if imdb_id:
                            log.debug('Found movie via imdb in filename: %s',
                                      nfo_file)
                            break
            except:
                pass

        # Search based on identifiers
        if not imdb_id:
            for identifier in group['identifiers']:

                if len(identifier) > 2:
                    try:
                        filename = list(group['files'].get('movie'))[0]
                    except:
                        filename = None

                    name_year = self.getReleaseNameYear(
                        identifier,
                        file_name=filename if not group['is_dvd'] else None)
                    if name_year.get('name') and name_year.get('year'):
                        search_q = '%(name)s %(year)s' % name_year
                        movie = fire_event('movie.search',
                                           q=search_q,
                                           merge=True,
                                           limit=1)

                        # Try with other
                        if len(movie) == 0 and name_year.get(
                                'other') and name_year['other'].get(
                                    'name') and name_year['other'].get('year'):
                            search_q2 = '%(name)s %(year)s' % name_year.get(
                                'other')
                            if search_q2 != search_q:
                                movie = fire_event('movie.search',
                                                   q=search_q2,
                                                   merge=True,
                                                   limit=1)

                        if len(movie) > 0:
                            imdb_id = movie[0].get('imdb')
                            log.debug('Found movie via search: %s', identifier)
                            if imdb_id: break
                else:
                    log.debug('Identifier to short to use for search: %s',
                              identifier)

        if imdb_id:
            try:
                db = get_db()
                return db.get('media', 'imdb-%s' % imdb_id,
                              with_doc=True)['doc']
            except:
                log.debug('Movie "%s" not in library, just getting info',
                          imdb_id)
                return {
                    'identifier':
                    imdb_id,
                    'info':
                    fire_event('movie.info',
                               identifier=imdb_id,
                               merge=True,
                               extended=False)
                }

        log.error(
            'No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.',
            group['identifiers'])
        return {}
示例#8
0
    def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
        if not params: params = {}

        # Make sure it's a correct zero filled imdb id
        params['identifier'] = get_imdb(params.get('identifier', ''))

        if not params.get('identifier'):
            msg = 'Can\'t add movie without imdb identifier.'
            log.error(msg)
            fire_event('notify.frontend', type='movie.is_tvshow', message=msg)
            return False
        elif not params.get('info'):
            try:
                is_movie = fire_event('movie.is_movie', identifier=params.get('identifier'), adding=True, single=True)
                if not is_movie:
                    msg = 'Can\'t add movie, seems to be a TV show.'
                    log.error(msg)
                    fire_event('notify.frontend', type='movie.is_tvshow', message=msg)
                    return False
            except:
                pass

        info = params.get('info')
        if not info or (info and len(info.get('titles', [])) == 0):
            info = fire_event('movie.info', merge=True, extended=False, identifier=params.get('identifier'))

        # Allow force re-add overwrite from param
        if 'force_readd' in params:
            fra = params.get('force_readd')
            force_readd = fra.lower() not in ['0', '-1'] if not isinstance(fra, bool) else fra

        # Set default title
        def_title = self.getDefaultTitle(info)

        # Default profile and category
        default_profile = {}
        if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
            default_profile = fire_event('profile.default', single=True)
        cat_id = params.get('category_id')

        try:
            db = get_db()

            media = {
                '_t': 'media',
                'type': 'movie',
                'title': def_title,
                'identifiers': {
                    'imdb': params.get('identifier')
                },
                'status': status if status else 'active',
                'profile_id': params.get('profile_id') or default_profile.get('_id'),
                'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
            }

            # Update movie info
            try: del info['in_wanted']
            except: pass
            try: del info['in_library']
            except: pass
            media['info'] = info

            new = False
            previous_profile = None
            try:
                m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']

                try:
                    db.get('id', m.get('profile_id'))
                    previous_profile = m.get('profile_id')
                except RecordNotFound:
                    pass
                except:
                    log.error('Failed getting previous profile: %s', traceback.format_exc())
            except:
                new = True
                m = db.insert(media)

            # Update dict to be usable
            m.update(media)

            added = True
            do_search = False
            search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
            onComplete = None

            if new:
                if search_after:
                    onComplete = self.createOnComplete(m['_id'])
                search_after = False
            elif force_readd:

                # Clean snatched history
                for release in fire_event('release.for_media', m['_id'], single=True):
                    if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
                        if params.get('ignore_previous', False):
                            fire_event('release.update_status', release['_id'], status='ignored')
                        else:
                            fire_event('release.delete', release['_id'], single=True)

                m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
                m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
                m['last_edit'] = int(time.time())
                m['tags'] = []

                do_search = True
                db.update(m)
            else:
                try: del params['info']
                except: pass
                log.debug('Movie already exists, not updating: %s', params)
                added = False

            # Trigger update info
            if added and update_after:
                # Do full update to get images etc
                fire_event_async('movie.update', m['_id'], default_title=params.get('title'), on_complete=onComplete)

            # Remove releases
            for rel in fire_event('release.for_media', m['_id'], single=True):
                if rel['status'] is 'available':
                    db.delete(rel)

            movie_dict = fire_event('media.get', m['_id'], single=True)
            if not movie_dict:
                log.debug('Failed adding media, can\'t find it anymore')
                return False

            if do_search and search_after:
                onComplete = self.createOnComplete(m['_id'])
                onComplete()

            if added and notify_after:

                if params.get('title'):
                    message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')
                else:
                    title = get_title(m)
                    if title:
                        message = 'Successfully added "%s" to your wanted list.' % title
                    else:
                        message = 'Successfully added to your wanted list.'
                fire_event('notify.frontend', type='movie.added', data=movie_dict, message=message)

            return movie_dict
        except:
            log.error('Failed adding media: %s', traceback.format_exc())
示例#9
0
 def getMovie(self, url):
     try:
         data = self.getUrl(url)
     except:
         data = ''
     return self.getInfo(get_imdb(data))
示例#10
0
    def migrate(self):

        from couchpotato import Env
        old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db')
        if not os.path.isfile(old_db): return

        log.info('=' * 30)
        log.info('Migrating database, hold on..')
        time.sleep(1)

        if os.path.isfile(old_db):

            migrate_start = time.time()

            import sqlite3
            conn = sqlite3.connect(old_db)

            migrate_list = {
                'category': [
                    'id', 'label', 'order', 'required', 'preferred', 'ignored',
                    'destination'
                ],
                'profile': ['id', 'label', 'order', 'core', 'hide'],
                'profiletype': [
                    'id', 'order', 'finish', 'wait_for', 'quality_id',
                    'profile_id'
                ],
                'quality':
                ['id', 'identifier', 'order', 'size_min', 'size_max'],
                'movie': [
                    'id', 'last_edit', 'library_id', 'status_id', 'profile_id',
                    'category_id'
                ],
                'library': ['id', 'identifier', 'info'],
                'librarytitle': ['id', 'title', 'default', 'libraries_id'],
                'library_files__file_library': ['library_id', 'file_id'],
                'release': [
                    'id', 'identifier', 'movie_id', 'status_id', 'quality_id',
                    'last_edit'
                ],
                'releaseinfo': ['id', 'identifier', 'value', 'release_id'],
                'release_files__file_release': ['release_id', 'file_id'],
                'status': ['id', 'identifier'],
                'properties': ['id', 'identifier', 'value'],
                'file': ['id', 'path', 'type_id'],
                'filetype': ['identifier', 'id']
            }

            migrate_data = {}
            rename_old = False

            try:

                c = conn.cursor()

                for ml in migrate_list:
                    migrate_data[ml] = {}
                    rows = migrate_list[ml]

                    try:
                        c.execute('SELECT %s FROM `%s`' %
                                  ('`' + '`,`'.join(rows) + '`', ml))
                    except:
                        # ignore faulty destination_id database
                        if ml == 'category':
                            migrate_data[ml] = {}
                        else:
                            rename_old = True
                            raise

                    for p in c.fetchall():
                        columns = {}
                        for row in migrate_list[ml]:
                            columns[row] = p[rows.index(row)]

                        if not migrate_data[ml].get(p[0]):
                            migrate_data[ml][p[0]] = columns
                        else:
                            if not isinstance(migrate_data[ml][p[0]], list):
                                migrate_data[ml][p[0]] = [
                                    migrate_data[ml][p[0]]
                                ]
                            migrate_data[ml][p[0]].append(columns)

                conn.close()

                log.info('Getting data took %s', time.time() - migrate_start)

                db = self.get_database()
                if not db.opened:
                    return

                # Use properties
                properties = migrate_data['properties']
                log.info('Importing %s properties', len(properties))
                for x in properties:
                    property = properties[x]
                    Env.prop(property.get('identifier'), property.get('value'))

                # Categories
                categories = migrate_data.get('category', [])
                log.info('Importing %s categories', len(categories))
                category_link = {}
                for x in categories:
                    c = categories[x]

                    new_c = db.insert({
                        '_t':
                        'category',
                        'order':
                        c.get('order', 999),
                        'label':
                        to_unicode(c.get('label', '')),
                        'ignored':
                        to_unicode(c.get('ignored', '')),
                        'preferred':
                        to_unicode(c.get('preferred', '')),
                        'required':
                        to_unicode(c.get('required', '')),
                        'destination':
                        to_unicode(c.get('destination', '')),
                    })

                    category_link[x] = new_c.get('_id')

                # Profiles
                log.info('Importing profiles')
                new_profiles = db.all('profile', with_doc=True)
                new_profiles_by_label = {}
                for x in new_profiles:

                    # Remove default non core profiles
                    if not x['doc'].get('core'):
                        db.delete(x['doc'])
                    else:
                        new_profiles_by_label[x['doc']['label']] = x['_id']

                profiles = migrate_data['profile']
                profile_link = {}
                for x in profiles:
                    p = profiles[x]

                    exists = new_profiles_by_label.get(p.get('label'))

                    # Update existing with order only
                    if exists and p.get('core'):
                        profile = db.get('id', exists)
                        profile['order'] = try_int(p.get('order'))
                        profile['hide'] = p.get('hide') in [
                            1, True, 'true', 'True'
                        ]
                        db.update(profile)

                        profile_link[x] = profile.get('_id')
                    else:

                        new_profile = {
                            '_t': 'profile',
                            'label': p.get('label'),
                            'order': int(p.get('order', 999)),
                            'core': p.get('core', False),
                            'qualities': [],
                            'wait_for': [],
                            'finish': []
                        }

                        types = migrate_data['profiletype']
                        for profile_type in types:
                            p_type = types[profile_type]
                            if types[profile_type]['profile_id'] == p['id']:
                                if p_type['quality_id']:
                                    new_profile['finish'].append(
                                        p_type['finish'])
                                    new_profile['wait_for'].append(
                                        p_type['wait_for'])
                                    new_profile['qualities'].append(
                                        migrate_data['quality']
                                        [p_type['quality_id']]['identifier'])

                        if len(new_profile['qualities']) > 0:
                            new_profile.update(db.insert(new_profile))
                            profile_link[x] = new_profile.get('_id')
                        else:
                            log.error(
                                'Corrupt profile list for "%s", using default.',
                                p.get('label'))

                # Qualities
                log.info('Importing quality sizes')
                new_qualities = db.all('quality', with_doc=True)
                new_qualities_by_identifier = {}
                for x in new_qualities:
                    new_qualities_by_identifier[x['doc']
                                                ['identifier']] = x['_id']

                qualities = migrate_data['quality']
                quality_link = {}
                for x in qualities:
                    q = qualities[x]
                    q_id = new_qualities_by_identifier[q.get('identifier')]

                    quality = db.get('id', q_id)
                    quality['order'] = q.get('order')
                    quality['size_min'] = try_int(q.get('size_min'))
                    quality['size_max'] = try_int(q.get('size_max'))
                    db.update(quality)

                    quality_link[x] = quality

                # Titles
                titles = migrate_data['librarytitle']
                titles_by_library = {}
                for x in titles:
                    title = titles[x]
                    if title.get('default'):
                        titles_by_library[title.get(
                            'libraries_id')] = title.get('title')

                # Releases
                releaseinfos = migrate_data['releaseinfo']
                for x in releaseinfos:
                    info = releaseinfos[x]

                    # Skip if release doesn't exist for this info
                    if not migrate_data['release'].get(info.get('release_id')):
                        continue

                    if not migrate_data['release'][info.get('release_id')].get(
                            'info'):
                        migrate_data['release'][info.get(
                            'release_id')]['info'] = {}

                    migrate_data['release'][info.get('release_id')]['info'][
                        info.get('identifier')] = info.get('value')

                releases = migrate_data['release']
                releases_by_media = {}
                for x in releases:
                    release = releases[x]
                    if not releases_by_media.get(release.get('movie_id')):
                        releases_by_media[release.get('movie_id')] = []

                    releases_by_media[release.get('movie_id')].append(release)

                # Type ids
                types = migrate_data['filetype']
                type_by_id = {}
                for t in types:
                    type = types[t]
                    type_by_id[type.get('id')] = type

                # Media
                log.info('Importing %s media items',
                         len(migrate_data['movie']))
                statuses = migrate_data['status']
                libraries = migrate_data['library']
                library_files = migrate_data['library_files__file_library']
                releases_files = migrate_data['release_files__file_release']
                all_files = migrate_data['file']
                poster_type = migrate_data['filetype']['poster']
                medias = migrate_data['movie']
                for x in medias:
                    m = medias[x]

                    status = statuses.get(m['status_id']).get('identifier')
                    l = libraries.get(m['library_id'])

                    # Only migrate wanted movies, Skip if no identifier present
                    if not l or not get_imdb(l.get('identifier')): continue

                    profile_id = profile_link.get(m['profile_id'])
                    category_id = category_link.get(m['category_id'])
                    title = titles_by_library.get(m['library_id'])
                    releases = releases_by_media.get(x, [])
                    info = json.loads(l.get('info', ''))

                    files = library_files.get(m['library_id'], [])
                    if not isinstance(files, list):
                        files = [files]

                    added_media = fire_event('movie.add', {
                        'info': info,
                        'identifier': l.get('identifier'),
                        'profile_id': profile_id,
                        'category_id': category_id,
                        'title': title
                    },
                                             force_readd=False,
                                             search_after=False,
                                             update_after=False,
                                             notify_after=False,
                                             status=status,
                                             single=True)

                    if not added_media:
                        log.error('Failed adding media %s: %s',
                                  (l.get('identifier'), info))
                        continue

                    added_media['files'] = added_media.get('files', {})
                    for f in files:
                        ffile = all_files[f.get('file_id')]

                        # Only migrate posters
                        if ffile.get('type_id') == poster_type.get('id'):
                            if ffile.get(
                                    'path') not in added_media['files'].get(
                                        'image_poster', []) and os.path.isfile(
                                            ffile.get('path')):
                                added_media['files']['image_poster'] = [
                                    ffile.get('path')
                                ]
                                break

                    if 'image_poster' in added_media['files']:
                        db.update(added_media)

                    for rel in releases:

                        empty_info = False
                        if not rel.get('info'):
                            empty_info = True
                            rel['info'] = {}

                        quality = quality_link.get(rel.get('quality_id'))
                        if not quality:
                            continue

                        release_status = statuses.get(
                            rel.get('status_id')).get('identifier')

                        if rel['info'].get('download_id'):
                            status_support = rel['info'].get(
                                'download_status_support',
                                False) in [True, 'true', 'True']
                            rel['info']['download_info'] = {
                                'id':
                                rel['info'].get('download_id'),
                                'downloader':
                                rel['info'].get('download_downloader'),
                                'status_support':
                                status_support,
                            }

                        # Add status to keys
                        rel['info']['status'] = release_status
                        if not empty_info:
                            fire_event('release.create_from_search',
                                       [rel['info']],
                                       added_media,
                                       quality,
                                       single=True)
                        else:
                            release = {
                                '_t': 'release',
                                'identifier': rel.get('identifier'),
                                'media_id': added_media.get('_id'),
                                'quality': quality.get('identifier'),
                                'status': release_status,
                                'last_edit': int(time.time()),
                                'files': {}
                            }

                            # Add downloader info if provided
                            try:
                                release['download_info'] = rel['info'][
                                    'download_info']
                                del rel['download_info']
                            except:
                                pass

                            # Add files
                            release_files = releases_files.get(
                                rel.get('id'), [])
                            if not isinstance(release_files, list):
                                release_files = [release_files]

                            if len(release_files) == 0:
                                continue

                            for f in release_files:
                                rfile = all_files.get(f.get('file_id'))
                                if not rfile:
                                    continue

                                file_type = type_by_id.get(
                                    rfile.get('type_id')).get('identifier')

                                if not release['files'].get(file_type):
                                    release['files'][file_type] = []

                                release['files'][file_type].append(
                                    rfile.get('path'))

                            try:
                                rls = db.get('release_identifier',
                                             rel.get('identifier'),
                                             with_doc=True)['doc']
                                rls.update(release)
                                db.update(rls)
                            except:
                                db.insert(release)

                log.info('Total migration took %s',
                         time.time() - migrate_start)
                log.info('=' * 30)

                rename_old = True

            except OperationalError:
                log.error(
                    'Migrating from faulty database, probably a (too) old version: %s',
                    traceback.format_exc())

                rename_old = True
            except:
                log.error('Migration failed: %s', traceback.format_exc())

            # rename old database
            if rename_old:
                random = random_string()
                log.info('Renaming old database to %s ',
                         '%s.%s_old' % (old_db, random))
                os.rename(old_db, '%s.%s_old' % (old_db, random))

                if os.path.isfile(old_db + '-wal'):
                    os.rename(old_db + '-wal',
                              '%s-wal.%s_old' % (old_db, random))
                if os.path.isfile(old_db + '-shm'):
                    os.rename(old_db + '-shm',
                              '%s-shm.%s_old' % (old_db, random))