Example #1
0
    def imdbMatch(self, url, imdbId):
        if getImdb(url) == imdbId:
            return True

        if url[:4] == 'http':
            try:
                cache_key = md5(url)
                data = self.getCache(cache_key, url)
            except IOError:
                log.error('Failed to open %s.', url)
                return False

            return getImdb(data) == imdbId

        return False
Example #2
0
    def imdbMatch(self, url, imdbId):
        if getImdb(url) == imdbId:
            return True

        if url[:4] == 'http':
            try:
                cache_key = md5(url)
                data = self.getCache(cache_key, url)
            except IOError:
                log.error('Failed to open %s.', url)
                return False

            return getImdb(data) == imdbId

        return False
Example #3
0
    def get(self, media_id):

        db = get_db()

        imdb_id = getImdb(str(media_id))

        media = None
        if imdb_id:
            media = db.get('media', 'imdb-%s' % imdb_id, with_doc=True)['doc']
        else:
            media = db.get('id', media_id)

        if media:

            # Attach category
            try:
                media['category'] = db.get('id', media.get('category_id'))
            except:
                pass

            media['releases'] = fireEvent('release.for_media',
                                          media['_id'],
                                          single=True)

        return media
Example #4
0
    def getIMDBids(self):

        movies = []

        enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
        urls = splitString(self.conf('automation_urls'))

        index = -1
        for url in urls:

            index += 1
            if not enablers[index]:
                continue

            try:
                rss_data = self.getHTMLData(url)
                imdbs = getImdb(rss_data, multiple = True)

                for imdb in imdbs:
                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc()))

        return movies
Example #5
0
    def search(self, q='', types=None, **kwargs):

        # Make sure types is the correct instance
        if isinstance(types, (str, unicode)):
            types = [types]
        elif isinstance(types, (list, tuple, set)):
            types = list(types)

        imdb_identifier = getImdb(q)

        if not types:
            if imdb_identifier:
                result = fireEvent('movie.info',
                                   identifier=imdb_identifier,
                                   merge=True)
                result = {result['type']: [result]}
            else:
                result = fireEvent('info.search', q=q, merge=True)
        else:
            result = {}
            for media_type in types:
                if imdb_identifier:
                    result[media_type] = fireEvent('%s.info' % media_type,
                                                   identifier=imdb_identifier)
                else:
                    result[media_type] = fireEvent('%s.search' % media_type,
                                                   q=q)

        return mergeDicts({
            'success': True,
        }, result)
Example #6
0
    def getIMDBids(self):

        movies = []

        for url in self.chart_urls:
            if self.conf("automation_charts_%s" % url):
                data = self.getHTMLData(self.chart_urls[url])
                if data:
                    html = BeautifulSoup(data)

                    try:
                        result_div = html.find("div", attrs={"id": "main"})
                        imdb_ids = getImdb(str(result_div), multiple=True)

                        for imdb_id in imdb_ids:
                            info = self.getInfo(imdb_id)
                            if info and self.isMinimalMovie(info):
                                movies.append(imdb_id)

                            if self.shuttingDown():
                                break

                    except:
                        log.error("Failed loading IMDB chart results from %s: %s", (url, traceback.format_exc()))

        return movies
Example #7
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = self.conf('automation_urls_use').split(',')

        index = -1
        for rss_url in self.conf('automation_urls').split(','):

            index += 1
            if not enablers[index]:
                continue
            elif 'rss.imdb' not in rss_url:
                log.error('This isn\'t the correct url.: %s', rss_url)
                continue

            try:
                cache_key = 'imdb.rss.%s' % md5(rss_url)

                rss_data = self.getCache(cache_key, rss_url)
                data = XMLTree.fromstring(rss_data)
                rss_movies = self.getElements(data, 'channel/item')

                for movie in rss_movies:
                    imdb = getImdb(self.getTextElement(movie, "link"))
                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s', (rss_url, traceback.format_exc()))

        return movies
Example #8
0
    def getIMDBids(self):

        movies = []

        enablers = [
            tryInt(x) for x in splitString(self.conf('automation_urls_use'))
        ]
        urls = splitString(self.conf('automation_urls'))

        index = -1
        for url in urls:

            index += 1
            if not enablers[index]:
                continue

            try:
                rss_data = self.getHTMLData(url)
                imdbs = getImdb(rss_data, multiple=True) if rss_data else []

                for imdb in imdbs:
                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s',
                          (url, traceback.format_exc()))

        return movies
Example #9
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
        urls = splitString(self.conf('automation_urls'))

        index = -1
        for url in urls:

            index += 1
            if not enablers[index]:
                continue

            try:
                cache_key = 'imdb.rss.%s' % md5(url)
                rss_data = self.getCache(cache_key, url)
                imdbs = getImdb(rss_data, multiple = True)

                for imdb in imdbs:
                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc()))

        return movies
Example #10
0
    def getIMDBids(self):

        movies = []

        for url in self.chart_urls:
            if self.conf('automation_charts_%s' % url):
                data = self.getHTMLData(self.chart_urls[url])
                if data:
                    html = BeautifulSoup(data)

                    try:
                        result_div = html.find('div', attrs = {'id': 'main'})

                        try:
                            if url in self.first_table:
                                table = result_div.find('table')
                                result_div = table if table else result_div
                        except:
                            pass

                        imdb_ids = getImdb(str(result_div), multiple = True)

                        for imdb_id in imdb_ids:
                            info = self.getInfo(imdb_id)
                            if info and self.isMinimalMovie(info):
                                movies.append(imdb_id)

                            if self.shuttingDown():
                                break

                    except:
                        log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))

        return movies
Example #11
0
    def getIMDBids(self):

        movies = []

        watchlist_enablers = [
            tryInt(x) for x in splitString(self.conf('automation_urls_use'))
        ]
        watchlist_urls = splitString(self.conf('automation_urls'))

        index = -1
        for watchlist_url in watchlist_urls:

            index += 1
            if not watchlist_enablers[index]:
                continue

            try:
                log.debug('Started IMDB watchlists: %s', watchlist_url)
                rss_data = self.getHTMLData(watchlist_url)
                imdbs = getImdb(rss_data, multiple=True) if rss_data else []

                for imdb in imdbs:
                    movies.append(imdb)

                    if self.shuttingDown():
                        break

            except:
                log.error('Failed loading IMDB watchlist: %s %s',
                          (watchlist_url, traceback.format_exc()))

        return movies
Example #12
0
    def getFromURL(self, url):
        log.debug("Getting IMDBs from: %s", url)
        html = self.getHTMLData(url)

        try:
            split = splitString(html, split_on='<div class="list compact">')[1]
            html = splitString(split, split_on='<div class="pages">')[0]
        except:
            try:
                split = splitString(html, split_on='<div id="main">')

                if len(split) < 2:
                    log.error('Failed parsing IMDB page "%s", unexpected html.', url)
                    return []

                html = BeautifulSoup(split[1])
                for x in ["list compact", "lister", "list detail sub-list"]:
                    html2 = html.find("div", attrs={"class": x})

                    if html2:
                        html = html2.contents
                        html = "".join([str(x) for x in html])
                        break
            except:
                log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))

        html = ss(html)
        imdbs = getImdb(html, multiple=True) if html else []

        return imdbs
Example #13
0
    def getIMDBids(self):

        movies = []

        for url in self.chart_urls:
            if self.conf('automation_charts_%s' % url):
                data = self.getHTMLData(self.chart_urls[url])
                if data:
                    html = BeautifulSoup(data)

                    try:
                        result_div = html.find('div', attrs = {'id': 'main'})

                        try:
                            if url in self.first_table:
                                table = result_div.find('table')
                                result_div = table if table else result_div
                        except:
                            pass

                        imdb_ids = getImdb(str(result_div), multiple = True)

                        for imdb_id in imdb_ids:
                            info = self.getInfo(imdb_id)
                            if info and self.isMinimalMovie(info):
                                movies.append(imdb_id)

                            if self.shuttingDown():
                                break

                    except:
                        log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))

        return movies
Example #14
0
    def get(self, media_id):

        try:
            db = get_db()

            imdb_id = getImdb(str(media_id))

            if imdb_id:
                media = db.get('media', 'imdb-%s' % imdb_id,
                               with_doc=True)['doc']
            else:
                media = db.get('id', media_id)

            if media:

                # Attach category
                try:
                    media['category'] = db.get('id', media.get('category_id'))
                except:
                    pass

                media['releases'] = fireEvent('release.for_media',
                                              media['_id'],
                                              single=True)

            return media

        except RecordNotFound:
            log.error('Media with id "%s" not found', media_id)
        except:
            raise
Example #15
0
    def get(self, media_id):

        try:
            db = get_db()

            imdb_id = getImdb(str(media_id))

            if imdb_id:
                media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
            else:
                media = db.get('id', media_id)

            if media:

                # Attach category
                try: media['category'] = db.get('id', media.get('category_id'))
                except: pass

                media['releases'] = fireEvent('release.for_media', media['_id'], single = True)

            return media

        except (RecordNotFound, RecordDeleted):
            log.error('Media with id "%s" not found', media_id)
        except:
            raise
Example #16
0
    def fromOld(self):

        if request.method != 'POST':
            return self.renderTemplate(__file__, 'form.html', url_for = url_for)

        file = request.files['old_db']

        uploaded_file = os.path.join(Env.get('cache_dir'), 'v1_database.db')

        if os.path.isfile(uploaded_file):
            os.remove(uploaded_file)

        file.save(uploaded_file)

        try:
            import sqlite3
            conn = sqlite3.connect(uploaded_file)

            wanted = []

            t = ('want',)
            cur = conn.execute('SELECT status, imdb FROM Movie WHERE status=?', t)
            for row in cur:
                status, imdb = row
                if getImdb(imdb):
                    wanted.append(imdb)
            conn.close()

            wanted = set(wanted)
            for imdb in wanted:
                fireEventAsync('movie.add', {'identifier': imdb}, search_after = False)

            message = 'Successfully imported %s movie(s)' % len(wanted)
        except Exception, e:
            message = 'Failed: %s' % e
Example #17
0
    def getFromURL(self, url):
        log.debug('Getting IMDBs from: %s', url)
        html = self.getHTMLData(url)

        try:
            split = splitString(html, split_on = "<div class=\"list compact\">")[1]
            html = splitString(split, split_on = "<div class=\"pages\">")[0]
        except:
            try:
                split = splitString(html, split_on = "<div id=\"main\">")

                if len(split) < 2:
                    log.error('Failed parsing IMDB page "%s", unexpected html.', url)
                    return []

                html = BeautifulSoup(split[1])
                for x in ['list compact', 'lister', 'list detail sub-list']:
                    html2 = html.find('div', attrs = {
                        'class': x
                    })

                    if html2:
                        html = html2.contents
                        html = ''.join([str(x) for x in html])
                        break
            except:
                log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))

        html = ss(html)
        imdbs = getImdb(html, multiple = True) if html else []

        return imdbs
Example #18
0
    def get(self, media_id):

        try:
            db = get_db()

            imdb_id = getImdb(str(media_id))

            if imdb_id:
                media = db.get("media", "imdb-%s" % imdb_id, with_doc=True)["doc"]
            else:
                media = db.get("id", media_id)

            if media:

                # Attach category
                try:
                    media["category"] = db.get("id", media.get("category_id"))
                except:
                    pass

                media["releases"] = fireEvent("release.for_media", media["_id"], single=True)

            return media

        except (RecordNotFound, RecordDeleted):
            log.error('Media with id "%s" not found', media_id)
        except:
            raise
Example #19
0
    def getIMDBids(self):

        movies = []

        watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
        watchlist_urls = splitString(self.conf('automation_urls'))

        index = -1
        for watchlist_url in watchlist_urls:

            index += 1
            if not watchlist_enablers[index]:
                continue

            try:
                log.debug('Started IMDB watchlists: %s', watchlist_url)
                rss_data = self.getHTMLData(watchlist_url)
                imdbs = getImdb(rss_data, multiple = True) if rss_data else []

                for imdb in imdbs:
                    movies.append(imdb)

                    if self.shuttingDown():
                        break

            except:
                log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc()))

        return movies
Example #20
0
    def search(self, q = '', types = None, **kwargs):

        # Make sure types is the correct instance
        if isinstance(types, (str, unicode)):
            types = [types]
        elif isinstance(types, (list, tuple, set)):
            types = list(types)

        imdb_identifier = getImdb(q)

        if not types:
            if imdb_identifier:
                result = fireEvent('movie.info', identifier = imdb_identifier, merge = True)
                result = {result['type']: [result]}
            else:
                result = fireEvent('info.search', q = q, merge = True)
        else:
            result = {}
            for media_type in types:
                if imdb_identifier:
                    result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier)
                else:
                    result[media_type] = fireEvent('%s.search' % media_type, q = q)

        return mergeDicts({
            'success': True,
        }, result)
Example #21
0
    def getMovie(self, url):

        cookie = {"Cookie": "c*k=1"}

        try:
            data = self.urlopen(url, headers=cookie)
        except:
            return

        return self.getInfo(getImdb(data))
Example #22
0
    def getMovie(self, url):

        cookie = {'Cookie': 'c*k=1'}

        try:
            data = self.urlopen(url, headers=cookie)
        except:
            return

        return self.getInfo(getImdb(data))
Example #23
0
    def checkNFO(self, check_name, imdb_id):
        cache_key = 'srrdb.com %s' % simplifyString(check_name)

        nfo = self.getCache(cache_key)
        if not nfo:
            try:
                nfo = self.urlopen('http://www.srrdb.com/showfile.php?release=%s' % check_name, show_error = False)
                self.setCache(cache_key, nfo)
            except:
                pass

        return nfo and getImdb(nfo) == imdb_id
Example #24
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = self.conf('automation_urls_use').split(',')

        index = -1
        for rss_url in self.conf('automation_urls').split(','):

            index += 1
            if not enablers[index]:
                continue
            elif 'rss.imdb' not in rss_url:
                log.error('This isn\'t the correct url.: %s', rss_url)
                continue

            prop_name = 'automation.imdb.last_update.%s' % md5(rss_url)
            last_update = float(Env.prop(prop_name, default=0))

            last_movie_added = 0
            try:
                cache_key = 'imdb.rss.%s' % md5(rss_url)

                rss_data = self.getCache(cache_key, rss_url)
                data = XMLTree.fromstring(rss_data)
                rss_movies = self.getElements(data, 'channel/item')

                for movie in rss_movies:
                    created = int(
                        time.mktime(
                            parse(self.getTextElement(movie,
                                                      "pubDate")).timetuple()))
                    imdb = getImdb(self.getTextElement(movie, "link"))

                    if created > last_movie_added:
                        last_movie_added = created

                    if not imdb or created <= last_update:
                        continue

                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s',
                          (rss_url, traceback.format_exc()))

            Env.prop(prop_name, last_movie_added)

        return movies
Example #25
0
    def extraCheck(self, nzb):
        """
        Exclusion when movie's description contains more than one IMDB
        reference to prevent a movie bundle downloading. CouchPotato
        is not able to extract a specific movie from an archive.

        .. seealso:: MovieSearcher.correctRelease
        """
        result = True
        ids = getImdb(nzb.get('description', ''), multiple=True)
        if len(ids) not in [0, 1]:
            T411.log.info('Too much IMDB ids: {0}'.format(', '.join(ids)))
            result = False
        return result
Example #26
0
    def search(self):

        q = getParam("q")
        cache_key = u"%s/%s" % (__name__, simplifyString(q))
        movies = Env.get("cache").get(cache_key)

        if not movies:

            if getImdb(q):
                movies = [fireEvent("movie.info", identifier=q, merge=True)]
            else:
                movies = fireEvent("movie.search", q=q, merge=True)
            Env.get("cache").set(cache_key, movies)

        return jsonified({"success": True, "empty": len(movies) == 0 if movies else 0, "movies": movies})
Example #27
0
    def search(self, q='', **kwargs):
        cache_key = u'%s/%s' % (__name__, simplifyString(q))
        shows = Env.get('cache').get(cache_key)

        if not shows:
            if getImdb(q):
                shows = [fireEvent('show.info', identifier=q, merge=True)]
            else:
                shows = fireEvent('show.search', q=q, merge=True)
            Env.get('cache').set(cache_key, shows)

        return {
            'success': True,
            'empty': len(shows) == 0 if shows else 0,
            'shows': shows,
        }
Example #28
0
    def get(self, movie_id):

        db = get_session()

        imdb_id = getImdb(str(movie_id))

        if(imdb_id):
            m = db.query(Movie).filter(Movie.library.has(identifier = imdb_id)).first()
        else:
            m = db.query(Movie).filter_by(id = movie_id).first()

        results = None
        if m:
            results = m.to_dict(self.default_dict)

        return results
Example #29
0
    def search(self, q = '', **kwargs):
        cache_key = u'%s/%s' % (__name__, simplifyString(q))
        shows = Env.get('cache').get(cache_key)

        if not shows:
            if getImdb(q):
                shows = [fireEvent('show.info', identifier = q, merge = True)]
            else:
                shows = fireEvent('show.search', q = q, merge = True)
            Env.get('cache').set(cache_key, shows)

        return {
            'success': True,
            'empty': len(shows) == 0 if shows else 0,
            'shows': shows,
        }
Example #30
0
    def get(self, media_id):

        db = get_session()

        imdb_id = getImdb(str(media_id))

        if imdb_id:
            m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
        else:
            m = db.query(Media).filter_by(id = media_id).first()

        results = None
        if m:
            results = m.to_dict(self.default_dict)

        return results
Example #31
0
    def get(self, media_id):

        db = get_session()

        imdb_id = getImdb(str(media_id))

        if imdb_id:
            m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
        else:
            m = db.query(Media).filter_by(id = media_id).first()

        results = None
        if m:
            results = m.to_dict(self.default_dict)

        db.expire_all()
        return results
Example #32
0
    def get(self, movie_id):

        db = get_session()

        imdb_id = getImdb(str(movie_id))

        if(imdb_id):
            m = db.query(Movie).filter(Movie.library.has(identifier = imdb_id)).first()
        else:
            m = db.query(Movie).filter_by(id = movie_id).first()

        results = None
        if m:
            results = m.to_dict(self.default_dict)

        db.expire_all()
        return results
Example #33
0
    def extraCheck(self, nzb):
        """
        Exclusion when movie's description contains more than one IMDB
        reference to prevent a movie bundle downloading. CouchPotato
        is not able to extract a specific movie from an archive.

        :param nzb: Representation of a torrent
        :type nzb: dict
        :return: The checking result
        :rtype: bool
        .. seealso:: MovieSearcher.correctRelease
        """
        result = True
        ids = getImdb(nzb.get('description', ''), multiple=True)
        if len(ids) not in [0, 1]:
            log.error('Too much IMDB ids: {}'.format(', '.join(ids)))
            result = False
        return result
Example #34
0
    def search(self, q='', **kwargs):

        cache_key = u'%s/%s' % (__name__, simplifyString(q))
        movies = Env.get('cache').get(cache_key)

        if not movies:

            if getImdb(q):
                movies = [fireEvent('movie.info', identifier=q, merge=True)]
            else:
                movies = fireEvent('movie.search', q=q, merge=True)
            Env.get('cache').set(cache_key, movies)

        return {
            'success': True,
            'empty': len(movies) == 0 if movies else 0,
            'movies': movies,
        }
Example #35
0
    def search(self, q = '', **kwargs):

        cache_key = u'%s/%s' % (__name__, simplifyString(q))
        movies = Env.get('cache').get(cache_key)

        if not movies:

            if getImdb(q):
                movies = [fireEvent('movie.info', identifier = q, merge = True)]
            else:
                movies = fireEvent('movie.search', q = q, merge = True)
            Env.get('cache').set(cache_key, movies)

        return {
            'success': True,
            'empty': len(movies) == 0 if movies else 0,
            'movies': movies,
        }
Example #36
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        enablers = self.conf('automation_urls_use').split(',')

        index = -1
        for rss_url in self.conf('automation_urls').split(','):

            index += 1
            if not enablers[index]:
                continue
            elif 'rss.imdb' not in rss_url:
                log.error('This isn\'t the correct url.: %s' % rss_url)
                continue

            prop_name = 'automation.imdb.last_update.%s' % md5(rss_url)
            last_update = float(Env.prop(prop_name, default = 0))

            try:
                cache_key = 'imdb.rss.%s' % md5(rss_url)

                rss_data = self.getCache(cache_key, rss_url)
                data = XMLTree.fromstring(rss_data)
                rss_movies = self.getElements(data, 'channel/item')

                for movie in rss_movies:
                    created = int(time.mktime(parse(self.getTextElement(movie, "pubDate")).timetuple()))
                    imdb = getImdb(self.getTextElement(movie, "link"))

                    if not imdb or created < last_update:
                        continue

                    movies.append(imdb)

            except:
                log.error('Failed loading IMDB watchlist: %s %s' % (rss_url, traceback.format_exc()))

            Env.prop(prop_name, time.time())

        return movies
Example #37
0
    def get(self, media_id):

        db = get_db()

        imdb_id = getImdb(str(media_id))

        media = None
        if imdb_id:
            media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
        else:
            media = db.get('id', media_id)

        if media:

            # Attach category
            try: media['category'] = db.get('id', media.get('category_id'))
            except: pass

            media['releases'] = fireEvent('release.for_media', media['_id'], single = True)

        return media
Example #38
0
    def getIMDBids(self):

        if self.isDisabled():
            return

        movies = []

        for csv_url in self.conf('automation_urls').split(','):
            try:
                cache_key = 'imdb_csv.%s' % md5(csv_url)
                csv_data = self.getCache(cache_key, csv_url)
                csv_reader = csv.reader(StringIO.StringIO(csv_data))
                csv_reader.next()

                for row in csv_reader:
                    imdb = getImdb(str(row))
                    if imdb:
                        movies.append(imdb)
            except:
                log.error('Failed loading IMDB watchlist: %s %s' % (csv_url, traceback.format_exc()))

        return movies
Example #39
0
    def getChartList(self):
        # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id)
        movie_lists = []
        max_items = int(self.conf('max_items', section='charts', default=5))

        for url in self.chart_urls:
            if self.conf('chart_display_%s' % url):
                movie_list = {'name': self.chart_names[url], 'url': self.chart_urls[url], 'order': self.chart_order[url], 'list': []}
                data = self.getHTMLData(self.chart_urls[url])
                if data:
                    html = BeautifulSoup(data)

                    try:
                        result_div = html.find('div', attrs = {'id': 'main'})

                        try:
                            if url in self.first_table:
                                table = result_div.find('table')
                                result_div = table if table else result_div
                        except:
                            pass

                        imdb_ids = getImdb(str(result_div), multiple = True)

                        for imdb_id in imdb_ids[0:max_items]:
                            info = self.getInfo(imdb_id)
                            movie_list['list'].append(info)

                            if self.shuttingDown():
                                break
                    except:
                        log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))

                    if movie_list['list']:
                        movie_lists.append(movie_list)


        return movie_lists
Example #40
0
    def fromOld(self):

        if request.method != 'POST':
            return self.renderTemplate(__file__, 'form.html', url_for=url_for)

        file = request.files['old_db']

        uploaded_file = os.path.join(Env.get('cache_dir'), 'v1_database.db')

        if os.path.isfile(uploaded_file):
            os.remove(uploaded_file)

        file.save(uploaded_file)

        try:
            import sqlite3
            conn = sqlite3.connect(uploaded_file)

            wanted = []

            t = ('want', )
            cur = conn.execute('SELECT status, imdb FROM Movie WHERE status=?',
                               t)
            for row in cur:
                status, imdb = row
                if getImdb(imdb):
                    wanted.append(imdb)
            conn.close()

            wanted = set(wanted)
            for imdb in wanted:
                fireEventAsync('movie.add', {'identifier': imdb},
                               search_after=False)

            message = 'Successfully imported %s movie(s)' % len(wanted)
        except Exception, e:
            message = 'Failed: %s' % e
Example #41
0
 def getMovie(self, url):
     return fireEvent('movie.info', identifier=getImdb(url), merge=True)
Example #42
0
    def determineMedia(self, group, release_download = None):

        # Get imdb id from downloader
        imdb_id = release_download and release_download.get('imdb_id')
        if imdb_id:
            log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id'))

        files = group['files']

        # Check for CP(imdb_id) string in the file paths
        if not imdb_id:
            for cur_file in files['movie']:
                imdb_id = self.getCPImdb(cur_file)
                if imdb_id:
                    log.debug('Found movie via CP tag: %s', cur_file)
                    break

        # Check and see if nfo contains the imdb-id
        nfo_file = None
        if not imdb_id:
            try:
                for nf in files['nfo']:
                    imdb_id = getImdb(nf, check_inside = True)
                    if imdb_id:
                        log.debug('Found movie via nfo file: %s', nf)
                        nfo_file = nf
                        break
            except:
                pass

        # Check and see if filenames contains the imdb-id
        if not imdb_id:
            try:
                for filetype in files:
                    for filetype_file in files[filetype]:
                        imdb_id = getImdb(filetype_file)
                        if imdb_id:
                            log.debug('Found movie via imdb in filename: %s', nfo_file)
                            break
            except:
                pass

        # Search based on identifiers
        if not imdb_id:
            for identifier in group['identifiers']:

                if len(identifier) > 2:
                    try: filename = list(group['files'].get('movie'))[0]
                    except: filename = None

                    name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None)
                    if name_year.get('name') and name_year.get('year'):
                        search_q = '%(name)s %(year)s' % name_year
                        movie = fireEvent('movie.search', q = search_q, merge = True, limit = 1)

                        # Try with other
                        if len(movie) == 0 and name_year.get('other') and name_year['other'].get('name') and name_year['other'].get('year'):
                            search_q2 = '%(name)s %(year)s' % name_year.get('other')
                            if search_q2 != search_q:
                                movie = fireEvent('movie.search', q = search_q2, merge = True, limit = 1)

                        if len(movie) > 0:
                            imdb_id = movie[0].get('imdb')
                            log.debug('Found movie via search: %s', identifier)
                            if imdb_id: break
                else:
                    log.debug('Identifier to short to use for search: %s', identifier)

        if imdb_id:
            try:
                db = get_db()
                return db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
            except:
                log.debug('Movie "%s" not in library, just getting info', imdb_id)
                return {
                    'identifier': imdb_id,
                    'info': fireEvent('movie.info', identifier = imdb_id, merge = True, extended = False)
                }

        log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers'])
        return {}
Example #43
0
    def correctMovie(self, nzb=None, movie=None, quality=None, **kwargs):

        imdb_results = kwargs.get('imdb_results', False)
        retention = Env.setting('retention', section='nzb')

        if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
            log.info2(
                'Wrong: Outside retention, age is %s, needs %s or lower: %s',
                (nzb['age'], retention, nzb['name']))
            return False

        movie_name = getTitle(movie['library'])
        movie_words = re.split('\W+', simplifyString(movie_name))
        nzb_name = simplifyString(nzb['name'])
        nzb_words = re.split('\W+', nzb_name)

        # Make sure it has required words
        required_words = splitString(
            self.conf('required_words', section='searcher').lower())
        try:
            required_words = list(
                set(required_words +
                    splitString(movie['category']['required'].lower())))
        except:
            pass

        req_match = 0
        for req_set in required_words:
            req = splitString(req_set, '&')
            req_match += len(list(set(nzb_words) & set(req))) == len(req)

        if len(required_words) > 0 and req_match == 0:
            log.info2('Wrong: Required word missing: %s', nzb['name'])
            return False

        # Ignore releases
        ignored_words = splitString(
            self.conf('ignored_words', section='searcher').lower())
        try:
            ignored_words = list(
                set(ignored_words +
                    splitString(movie['category']['ignored'].lower())))
        except:
            pass

        ignored_match = 0
        for ignored_set in ignored_words:
            ignored = splitString(ignored_set, '&')
            ignored_match += len(list(set(nzb_words)
                                      & set(ignored))) == len(ignored)

        if len(ignored_words) > 0 and ignored_match:
            log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name']))
            return False

        # Ignore p**n stuff
        pron_tags = [
            'xxx', 'sex', 'anal', 't**s', 'f**k', 'p**n', 'orgy', 'milf',
            'boobs', 'erotica', 'erotic'
        ]
        pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words))
        if pron_words:
            log.info('Wrong: %s, probably pr0n', (nzb['name']))
            return False

        preferred_quality = fireEvent('quality.single',
                                      identifier=quality['identifier'],
                                      single=True)

        # Contains lower quality string
        if fireEvent('searcher.contains_other_quality',
                     nzb,
                     movie_year=movie['library']['year'],
                     preferred_quality=preferred_quality,
                     single=True):
            log.info2('Wrong: %s, looking for %s',
                      (nzb['name'], quality['label']))
            return False

        # File to small
        if nzb['size'] and preferred_quality['size_min'] > nzb['size']:
            log.info2(
                'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_min']))
            return False

        # File to large
        if nzb['size'] and preferred_quality.get('size_max') < nzb['size']:
            log.info2(
                'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_max']))
            return False

        # Provider specific functions
        get_more = nzb.get('get_more_info')
        if get_more:
            get_more(nzb)

        extra_check = nzb.get('extra_check')
        if extra_check and not extra_check(nzb):
            return False

        if imdb_results:
            return True

        # Check if nzb contains imdb link
        if getImdb(nzb.get('description',
                           '')) == movie['library']['identifier']:
            return True

        for raw_title in movie['library']['titles']:
            for movie_title in possibleTitles(raw_title['title']):
                movie_words = re.split('\W+', simplifyString(movie_title))

                if fireEvent('searcher.correct_name',
                             nzb['name'],
                             movie_title,
                             single=True):
                    # if no IMDB link, at least check year range 1
                    if len(movie_words) > 2 and fireEvent(
                            'searcher.correct_year',
                            nzb['name'],
                            movie['library']['year'],
                            1,
                            single=True):
                        return True

                    # if no IMDB link, at least check year
                    if len(movie_words) <= 2 and fireEvent(
                            'searcher.correct_year',
                            nzb['name'],
                            movie['library']['year'],
                            0,
                            single=True):
                        return True

        log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'",
                 (nzb['name'], movie_name, movie['library']['year']))
        return False
Example #44
0
    def correctRelease(self, nzb=None, media=None, quality=None, **kwargs):

        if media.get('type') != 'movie': return

        media_title = fireEvent('searcher.get_search_title',
                                media,
                                single=True)

        imdb_results = kwargs.get('imdb_results', False)
        retention = Env.setting('retention', section='nzb')

        if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
            log.info2(
                'Wrong: Outside retention, age is %s, needs %s or lower: %s',
                (nzb['age'], retention, nzb['name']))
            return False

        # Check for required and ignored words
        if not fireEvent(
                'searcher.correct_words', nzb['name'], media, single=True):
            return False

        preferred_quality = quality if quality else fireEvent(
            'quality.single', identifier=quality['identifier'], single=True)

        # Contains lower quality string
        contains_other = fireEvent('searcher.contains_other_quality',
                                   nzb,
                                   movie_year=media['info']['year'],
                                   preferred_quality=preferred_quality,
                                   single=True)
        if contains_other != False:
            log.info2(
                'Wrong: %s, looking for %s, found %s',
                (nzb['name'], quality['label'], [x for x in contains_other]
                 if contains_other else 'no quality'))
            return False

        # Contains lower quality string
        if not fireEvent('searcher.correct_3d',
                         nzb,
                         preferred_quality=preferred_quality,
                         single=True):
            log.info2(
                'Wrong: %s, %slooking for %s in 3D',
                (nzb['name'],
                 ('' if preferred_quality['custom'].get('3d') else 'NOT '),
                 quality['label']))
            return False

        # File to small
        if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_min']))
            return False

        # File to large
        if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_max']))
            return False

        # Provider specific functions
        get_more = nzb.get('get_more_info')
        if get_more:
            get_more(nzb)

        extra_check = nzb.get('extra_check')
        if extra_check and not extra_check(nzb):
            return False

        if imdb_results:
            return True

        # Check if nzb contains imdb link
        if getImdb(nzb.get('description', '')) == getIdentifier(media):
            return True

        for raw_title in media['info']['titles']:
            for movie_title in possibleTitles(raw_title):
                movie_words = re.split('\W+', simplifyString(movie_title))

                if fireEvent('searcher.correct_name',
                             nzb['name'],
                             movie_title,
                             single=True):
                    # if no IMDB link, at least check year range 1
                    if len(movie_words) > 2 and fireEvent(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            1,
                            single=True):
                        return True

                    # if no IMDB link, at least check year
                    if len(movie_words) <= 2 and fireEvent(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            0,
                            single=True):
                        return True

        log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'",
                 (nzb['name'], media_title, media['info']['year']))
        return False
Example #45
0
 def getMovie(self, url):
     return self.getInfo(getImdb(url))
Example #46
0
    def checkSnatched(self):

        if self.checking_snatched:
            log.debug('Already checking snatched')

        self.checking_snatched = True

        snatched_status, ignored_status, failed_status, done_status = \
            fireEvent('status.get', ['snatched', 'ignored', 'failed', 'done'], single = True)

        db = get_session()
        rels = db.query(Release).filter_by(status_id = snatched_status.get('id')).all()

        scan_required = False

        if rels:
            self.checking_snatched = True
            log.debug('Checking status snatched releases...')

            statuses = fireEvent('download.status', merge = True)
            if not statuses:
                log.debug('Download status functionality is not implemented for active downloaders.')
                scan_required = True
            else:
                try:
                    for rel in rels:
                        rel_dict = rel.to_dict({'info': {}})

                        # Get current selected title
                        default_title = getTitle(rel.movie.library)

                        # Check if movie has already completed and is manage tab (legacy db correction)
                        if rel.movie.status_id == done_status.get('id'):
                            log.debug('Found a completed movie with a snatched release : %s. Setting release status to ignored...' , default_title)
                            rel.status_id = ignored_status.get('id')
                            rel.last_edit = int(time.time())
                            db.commit()
                            continue

                        movie_dict = fireEvent('movie.get', rel.movie_id, single = True)

                        # check status
                        nzbname = self.createNzbName(rel_dict['info'], movie_dict)

                        found = False
                        for item in statuses:
                            found_release = False
                            if rel_dict['info'].get('download_id'):
                                if item['id'] == rel_dict['info']['download_id'] and item['downloader'] == rel_dict['info']['download_downloader']:
                                    log.debug('Found release by id: %s', item['id'])
                                    found_release = True
                            else:
                                if item['name'] == nzbname or rel_dict['info']['name'] in item['name'] or getImdb(item['name']) == movie_dict['library']['identifier']:
                                    found_release = True

                            if found_release:
                                timeleft = 'N/A' if item['timeleft'] == -1 else item['timeleft']
                                log.debug('Found %s: %s, time to go: %s', (item['name'], item['status'].upper(), timeleft))

                                if item['status'] == 'busy':
                                    pass
                                elif item['status'] == 'failed':
                                    fireEvent('download.remove_failed', item, single = True)
                                    rel.status_id = failed_status.get('id')
                                    rel.last_edit = int(time.time())
                                    db.commit()

                                    if self.conf('next_on_failed'):
                                        fireEvent('searcher.try_next_release', movie_id = rel.movie_id)
                                elif item['status'] == 'completed':
                                    log.info('Download of %s completed!', item['name'])
                                    if item['id'] and item['downloader'] and item['folder']:
                                        fireEventAsync('renamer.scan', movie_folder = item['folder'], download_info = item)
                                    else:
                                        scan_required = True

                                found = True
                                break

                        if not found:
                            log.info('%s not found in downloaders', nzbname)

                except:
                    log.error('Failed checking for release in downloader: %s', traceback.format_exc())

        if scan_required:
            fireEvent('renamer.scan')

        self.checking_snatched = False

        return True
Example #47
0
    def getIMDBids(self):

        movies = []

        watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
        watchlist_urls = splitString(self.conf('automation_urls'))

        index = -1
        for watchlist_url in watchlist_urls:

            try:
                # Get list ID
                ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url)
                if len(ids) == 1:
                    watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[0]
                # Try find user id with watchlist
                else:
                    userids = re.findall('(ur\d{7,9})', watchlist_url)
                    if len(userids) == 1:
                        watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[0]
            except:
                log.error('Failed getting id from watchlist: %s', traceback.format_exc())

            index += 1
            if not watchlist_enablers[index]:
                continue

            start = 0
            while True:
                try:

                    w_url = '%s&start=%s' % (watchlist_url, start)
                    log.debug('Started IMDB watchlists: %s', w_url)
                    html = self.getHTMLData(w_url)

                    try:
                        split = splitString(html, split_on="<div class=\"list compact\">")[1]
                        html = splitString(split, split_on="<div class=\"pages\">")[0]
                    except:
                        pass

                    imdbs = getImdb(html, multiple = True) if html else []

                    for imdb in imdbs:
                        if imdb not in movies:
                            movies.append(imdb)

                        if self.shuttingDown():
                            break

                    log.debug('Found %s movies on %s', (len(imdbs), w_url))

                    if len(imdbs) < 250:
                        break

                    start += 250

                except:
                    log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc()))

        return movies
Example #48
0
    def migrate(self):

        from couchpotato import Env
        old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db')
        if not os.path.isfile(old_db): return

        log.info('=' * 30)
        log.info('Migrating database, hold on..')
        time.sleep(1)

        if os.path.isfile(old_db):

            migrate_start = time.time()

            import sqlite3
            conn = sqlite3.connect(old_db)

            migrate_list = {
                'category': ['id', 'label', 'order', 'required', 'preferred', 'ignored', 'destination'],
                'profile': ['id', 'label', 'order', 'core', 'hide'],
                'profiletype': ['id', 'order', 'finish', 'wait_for', 'quality_id', 'profile_id'],
                'quality': ['id', 'identifier', 'order', 'size_min', 'size_max'],
                'movie': ['id', 'last_edit', 'library_id', 'status_id', 'profile_id', 'category_id'],
                'library': ['id', 'identifier', 'info'],
                'librarytitle': ['id', 'title', 'default', 'libraries_id'],
                'library_files__file_library': ['library_id', 'file_id'],
                'release': ['id', 'identifier', 'movie_id', 'status_id', 'quality_id', 'last_edit'],
                'releaseinfo': ['id', 'identifier', 'value', 'release_id'],
                'release_files__file_release': ['release_id', 'file_id'],
                'status': ['id', 'identifier'],
                'properties': ['id', 'identifier', 'value'],
                'file': ['id', 'path', 'type_id'],
                'filetype': ['identifier', 'id']
            }

            migrate_data = {}
            rename_old = False

            try:

                c = conn.cursor()

                for ml in migrate_list:
                    migrate_data[ml] = {}
                    rows = migrate_list[ml]

                    try:
                        c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml))
                    except:
                        # ignore faulty destination_id database
                        if ml == 'category':
                            migrate_data[ml] = {}
                        else:
                            rename_old = True
                            raise

                    for p in c.fetchall():
                        columns = {}
                        for row in migrate_list[ml]:
                            columns[row] = p[rows.index(row)]

                        if not migrate_data[ml].get(p[0]):
                            migrate_data[ml][p[0]] = columns
                        else:
                            if not isinstance(migrate_data[ml][p[0]], list):
                                migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
                            migrate_data[ml][p[0]].append(columns)

                conn.close()

                log.info('Getting data took %s', time.time() - migrate_start)

                db = self.getDB()
                if not db.opened:
                    return

                # Use properties
                properties = migrate_data['properties']
                log.info('Importing %s properties', len(properties))
                for x in properties:
                    property = properties[x]
                    Env.prop(property.get('identifier'), property.get('value'))

                # Categories
                categories = migrate_data.get('category', [])
                log.info('Importing %s categories', len(categories))
                category_link = {}
                for x in categories:
                    c = categories[x]

                    new_c = db.insert({
                        '_t': 'category',
                        'order': c.get('order', 999),
                        'label': toUnicode(c.get('label', '')),
                        'ignored': toUnicode(c.get('ignored', '')),
                        'preferred': toUnicode(c.get('preferred', '')),
                        'required': toUnicode(c.get('required', '')),
                        'destination': toUnicode(c.get('destination', '')),
                    })

                    category_link[x] = new_c.get('_id')

                # Profiles
                log.info('Importing profiles')
                new_profiles = db.all('profile', with_doc = True)
                new_profiles_by_label = {}
                for x in new_profiles:

                    # Remove default non core profiles
                    if not x['doc'].get('core'):
                        db.delete(x['doc'])
                    else:
                        new_profiles_by_label[x['doc']['label']] = x['_id']

                profiles = migrate_data['profile']
                profile_link = {}
                for x in profiles:
                    p = profiles[x]

                    exists = new_profiles_by_label.get(p.get('label'))

                    # Update existing with order only
                    if exists and p.get('core'):
                        profile = db.get('id', exists)
                        profile['order'] = tryInt(p.get('order'))
                        profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
                        db.update(profile)

                        profile_link[x] = profile.get('_id')
                    else:

                        new_profile = {
                            '_t': 'profile',
                            'label': p.get('label'),
                            'order': int(p.get('order', 999)),
                            'core': p.get('core', False),
                            'qualities': [],
                            'wait_for': [],
                            'finish': []
                        }

                        types = migrate_data['profiletype']
                        for profile_type in types:
                            p_type = types[profile_type]
                            if types[profile_type]['profile_id'] == p['id']:
                                if p_type['quality_id']:
                                    new_profile['finish'].append(p_type['finish'])
                                    new_profile['wait_for'].append(p_type['wait_for'])
                                    new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])

                        if len(new_profile['qualities']) > 0:
                            new_profile.update(db.insert(new_profile))
                            profile_link[x] = new_profile.get('_id')
                        else:
                            log.error('Corrupt profile list for "%s", using default.', p.get('label'))

                # Qualities
                log.info('Importing quality sizes')
                new_qualities = db.all('quality', with_doc = True)
                new_qualities_by_identifier = {}
                for x in new_qualities:
                    new_qualities_by_identifier[x['doc']['identifier']] = x['_id']

                qualities = migrate_data['quality']
                quality_link = {}
                for x in qualities:
                    q = qualities[x]
                    q_id = new_qualities_by_identifier[q.get('identifier')]

                    quality = db.get('id', q_id)
                    quality['order'] = q.get('order')
                    quality['size_min'] = tryInt(q.get('size_min'))
                    quality['size_max'] = tryInt(q.get('size_max'))
                    db.update(quality)

                    quality_link[x] = quality

                # Titles
                titles = migrate_data['librarytitle']
                titles_by_library = {}
                for x in titles:
                    title = titles[x]
                    if title.get('default'):
                        titles_by_library[title.get('libraries_id')] = title.get('title')

                # Releases
                releaseinfos = migrate_data['releaseinfo']
                for x in releaseinfos:
                    info = releaseinfos[x]

                    # Skip if release doesn't exist for this info
                    if not migrate_data['release'].get(info.get('release_id')):
                        continue

                    if not migrate_data['release'][info.get('release_id')].get('info'):
                        migrate_data['release'][info.get('release_id')]['info'] = {}

                    migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')

                releases = migrate_data['release']
                releases_by_media = {}
                for x in releases:
                    release = releases[x]
                    if not releases_by_media.get(release.get('movie_id')):
                        releases_by_media[release.get('movie_id')] = []

                    releases_by_media[release.get('movie_id')].append(release)

                # Type ids
                types = migrate_data['filetype']
                type_by_id = {}
                for t in types:
                    type = types[t]
                    type_by_id[type.get('id')] = type

                # Media
                log.info('Importing %s media items', len(migrate_data['movie']))
                statuses = migrate_data['status']
                libraries = migrate_data['library']
                library_files = migrate_data['library_files__file_library']
                releases_files = migrate_data['release_files__file_release']
                all_files = migrate_data['file']
                poster_type = migrate_data['filetype']['poster']
                medias = migrate_data['movie']
                for x in medias:
                    m = medias[x]

                    status = statuses.get(m['status_id']).get('identifier')
                    l = libraries.get(m['library_id'])

                    # Only migrate wanted movies, Skip if no identifier present
                    if not l or not getImdb(l.get('identifier')): continue

                    profile_id = profile_link.get(m['profile_id'])
                    category_id = category_link.get(m['category_id'])
                    title = titles_by_library.get(m['library_id'])
                    releases = releases_by_media.get(x, [])
                    info = json.loads(l.get('info', ''))

                    files = library_files.get(m['library_id'], [])
                    if not isinstance(files, list):
                        files = [files]

                    added_media = fireEvent('movie.add', {
                        'info': info,
                        'identifier': l.get('identifier'),
                        'profile_id': profile_id,
                        'category_id': category_id,
                        'title': title
                    }, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)

                    if not added_media:
                        log.error('Failed adding media %s: %s', (l.get('identifier'), info))
                        continue

                    added_media['files'] = added_media.get('files', {})
                    for f in files:
                        ffile = all_files[f.get('file_id')]

                        # Only migrate posters
                        if ffile.get('type_id') == poster_type.get('id'):
                            if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
                                added_media['files']['image_poster'] = [ffile.get('path')]
                                break

                    if 'image_poster' in added_media['files']:
                        db.update(added_media)

                    for rel in releases:

                        empty_info = False
                        if not rel.get('info'):
                            empty_info = True
                            rel['info'] = {}

                        quality = quality_link.get(rel.get('quality_id'))
                        if not quality:
                            continue

                        release_status = statuses.get(rel.get('status_id')).get('identifier')

                        if rel['info'].get('download_id'):
                            status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True']
                            rel['info']['download_info'] = {
                                'id': rel['info'].get('download_id'),
                                'downloader': rel['info'].get('download_downloader'),
                                'status_support': status_support,
                            }

                        # Add status to keys
                        rel['info']['status'] = release_status
                        if not empty_info:
                            fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
                        else:
                            release = {
                                '_t': 'release',
                                'identifier': rel.get('identifier'),
                                'media_id': added_media.get('_id'),
                                'quality': quality.get('identifier'),
                                'status': release_status,
                                'last_edit': int(time.time()),
                                'files': {}
                            }

                            # Add downloader info if provided
                            try:
                                release['download_info'] = rel['info']['download_info']
                                del rel['download_info']
                            except:
                                pass

                            # Add files
                            release_files = releases_files.get(rel.get('id'), [])
                            if not isinstance(release_files, list):
                                release_files = [release_files]

                            if len(release_files) == 0:
                                continue

                            for f in release_files:
                                rfile = all_files.get(f.get('file_id'))
                                if not rfile:
                                    continue

                                file_type = type_by_id.get(rfile.get('type_id')).get('identifier')

                                if not release['files'].get(file_type):
                                    release['files'][file_type] = []

                                release['files'][file_type].append(rfile.get('path'))

                            try:
                                rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
                                rls.update(release)
                                db.update(rls)
                            except:
                                db.insert(release)

                log.info('Total migration took %s', time.time() - migrate_start)
                log.info('=' * 30)

                rename_old = True

            except OperationalError:
                log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc())
                
                rename_old = True
            except:
                log.error('Migration failed: %s', traceback.format_exc())


            # rename old database
            if rename_old:
                random = randomString()
                log.info('Renaming old database to %s ', '%s.%s_old' % (old_db, random))
                os.rename(old_db, '%s.%s_old' % (old_db, random))

                if os.path.isfile(old_db + '-wal'):
                    os.rename(old_db + '-wal', '%s-wal.%s_old' % (old_db, random))
                if os.path.isfile(old_db + '-shm'):
                    os.rename(old_db + '-shm', '%s-shm.%s_old' % (old_db, random))
Example #49
0
    def determineMovie(self, group):
        imdb_id = None

        files = group['files']

        # Check for CP(imdb_id) string in the file paths
        for cur_file in files['movie']:
            imdb_id = self.getCPImdb(cur_file)
            if imdb_id:
                log.debug('Found movie via CP tag: %s', cur_file)
                break

        # Check and see if nfo contains the imdb-id
        if not imdb_id:
            try:
                for nfo_file in files['nfo']:
                    imdb_id = getImdb(nfo_file)
                    if imdb_id:
                        log.debug('Found movie via nfo file: %s', nfo_file)
                        break
            except:
                pass

        # Check and see if filenames contains the imdb-id
        if not imdb_id:
            try:
                for filetype in files:
                    for filetype_file in files[filetype]:
                        imdb_id = getImdb(filetype_file, check_inside=False)
                        if imdb_id:
                            log.debug('Found movie via imdb in filename: %s',
                                      nfo_file)
                            break
            except:
                pass

        # Check if path is already in db
        if not imdb_id:
            db = get_session()
            for cur_file in files['movie']:
                f = db.query(File).filter_by(path=toUnicode(cur_file)).first()
                try:
                    imdb_id = f.library[0].identifier
                    log.debug('Found movie via database: %s', cur_file)
                    break
                except:
                    pass

        # Search based on OpenSubtitleHash
        if not imdb_id and not group['is_dvd']:
            for cur_file in files['movie']:
                movie = fireEvent('movie.by_hash', file=cur_file, merge=True)

                if len(movie) > 0:
                    imdb_id = movie[0]['imdb']
                    if imdb_id:
                        log.debug('Found movie via OpenSubtitleHash: %s',
                                  cur_file)
                        break

        # Search based on identifiers
        if not imdb_id:
            for identifier in group['identifiers']:

                if len(identifier) > 2:
                    try:
                        filename = list(group['files'].get('movie'))[0]
                    except:
                        filename = None

                    name_year = self.getReleaseNameYear(
                        identifier,
                        file_name=filename if not group['is_dvd'] else None)
                    if name_year.get('name') and name_year.get('year'):
                        movie = fireEvent('movie.search',
                                          q='%(name)s %(year)s' % name_year,
                                          merge=True,
                                          limit=1)

                        if len(movie) > 0:
                            imdb_id = movie[0]['imdb']
                            log.debug('Found movie via search: %s', cur_file)
                            if imdb_id: break
                else:
                    log.debug('Identifier to short to use for search: %s',
                              identifier)

        if imdb_id:
            return fireEvent('library.add',
                             attrs={'identifier': imdb_id},
                             update_after=False,
                             single=True)

        log.error(
            'No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.',
            group['identifiers'])
        return {}
Example #50
0
    def correctRelease(self, nzb = None, media = None, quality = None, **kwargs):

        if media.get('type') != 'movie': return

        media_title = fireEvent('searcher.get_search_title', media, single = True)

        imdb_results = kwargs.get('imdb_results', False)
        retention = Env.setting('retention', section = 'nzb')

        if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
            log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
            return False

        # Check for required and ignored words
        if not fireEvent('searcher.correct_words', nzb['name'], media, single = True):
            return False

        preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True)

        # Contains lower quality string
        contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True)
        if contains_other and isinstance(contains_other, dict):
            log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality'))
            return False

        # Contains lower quality string
        if not fireEvent('searcher.correct_3d', nzb, preferred_quality = preferred_quality, single = True):
            log.info2('Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label']))
            return False

        # File to small
        if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(nzb['size']):
            log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min']))
            return False

        # File to large
        if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(nzb['size']):
            log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max']))
            return False

        # Provider specific functions
        get_more = nzb.get('get_more_info')
        if get_more:
            get_more(nzb)

        extra_check = nzb.get('extra_check')
        if extra_check and not extra_check(nzb):
            return False


        if imdb_results:
            return True

        # Check if nzb contains imdb link
        if getImdb(nzb.get('description', '')) == getIdentifier(media):
            return True

        for raw_title in media['info']['titles']:
            for movie_title in possibleTitles(raw_title):
                movie_words = re.split('\W+', simplifyString(movie_title))

                if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True):
                    # if no IMDB link, at least check year range 1
                    if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 1, single = True):
                        return True

                    # if no IMDB link, at least check year
                    if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 0, single = True):
                        return True

        log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year']))
        return False
Example #51
0
    def add(self, params=None, force_readd=True, search_after=True, update_after=True, notify_after=True, status=None):
        if not params:
            params = {}

        # Make sure it's a correct zero filled imdb id
        params["identifier"] = getImdb(params.get("identifier", ""))

        if not params.get("identifier"):
            msg = "Can't add movie without imdb identifier."
            log.error(msg)
            fireEvent("notify.frontend", type="movie.is_tvshow", message=msg)
            return False
        elif not params.get("info"):
            try:
                is_movie = fireEvent("movie.is_movie", identifier=params.get("identifier"), single=True)
                if not is_movie:
                    msg = "Can't add movie, seems to be a TV show."
                    log.error(msg)
                    fireEvent("notify.frontend", type="movie.is_tvshow", message=msg)
                    return False
            except:
                pass

        info = params.get("info")
        if not info or (info and len(info.get("titles", [])) == 0):
            info = fireEvent("movie.info", merge=True, extended=False, identifier=params.get("identifier"))

        # Set default title
        default_title = toUnicode(info.get("title"))
        titles = info.get("titles", [])
        counter = 0
        def_title = None
        for title in titles:
            if (
                (len(default_title) == 0 and counter == 0)
                or len(titles) == 1
                or title.lower() == toUnicode(default_title.lower())
                or (toUnicode(default_title) == six.u("") and toUnicode(titles[0]) == title)
            ):
                def_title = toUnicode(title)
                break
            counter += 1

        if not def_title:
            def_title = toUnicode(titles[0])

        # Default profile and category
        default_profile = {}
        if (not params.get("profile_id") and status != "done") or params.get("ignore_previous", False):
            default_profile = fireEvent("profile.default", single=True)
        cat_id = params.get("category_id")

        try:
            db = get_db()

            media = {
                "_t": "media",
                "type": "movie",
                "title": def_title,
                "identifiers": {"imdb": params.get("identifier")},
                "status": status if status else "active",
                "profile_id": params.get("profile_id") or default_profile.get("_id"),
                "category_id": cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != "-1" else None,
            }

            # Update movie info
            try:
                del info["in_wanted"]
            except:
                pass
            try:
                del info["in_library"]
            except:
                pass
            media["info"] = info

            new = False
            previous_profile = None
            try:
                m = db.get("media", "imdb-%s" % params.get("identifier"), with_doc=True)["doc"]

                try:
                    db.get("id", m.get("profile_id"))
                    previous_profile = m.get("profile_id")
                except RecordNotFound:
                    pass
                except:
                    log.error("Failed getting previous profile: %s", traceback.format_exc())
            except:
                new = True
                m = db.insert(media)

            # Update dict to be usable
            m.update(media)

            added = True
            do_search = False
            search_after = search_after and self.conf("search_on_add", section="moviesearcher")
            onComplete = None

            if new:
                if search_after:
                    onComplete = self.createOnComplete(m["_id"])
                search_after = False
            elif force_readd:

                # Clean snatched history
                for release in fireEvent("release.for_media", m["_id"], single=True):
                    if release.get("status") in ["downloaded", "snatched", "seeding", "done"]:
                        if params.get("ignore_previous", False):
                            release["status"] = "ignored"
                            db.update(release)
                        else:
                            fireEvent("release.delete", release["_id"], single=True)

                m["profile_id"] = (
                    (params.get("profile_id") or default_profile.get("_id"))
                    if not previous_profile
                    else previous_profile
                )
                m["category_id"] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get("category_id") or None)
                m["last_edit"] = int(time.time())
                m["tags"] = []

                do_search = True
                db.update(m)
            else:
                try:
                    del params["info"]
                except:
                    pass
                log.debug("Movie already exists, not updating: %s", params)
                added = False

            # Trigger update info
            if added and update_after:
                # Do full update to get images etc
                fireEventAsync("movie.update", m["_id"], default_title=params.get("title"), on_complete=onComplete)

            # Remove releases
            for rel in fireEvent("release.for_media", m["_id"], single=True):
                if rel["status"] is "available":
                    db.delete(rel)

            movie_dict = fireEvent("media.get", m["_id"], single=True)

            if do_search and search_after:
                onComplete = self.createOnComplete(m["_id"])
                onComplete()

            if added and notify_after:

                if params.get("title"):
                    message = 'Successfully added "%s" to your wanted list.' % params.get("title", "")
                else:
                    title = getTitle(m)
                    if title:
                        message = 'Successfully added "%s" to your wanted list.' % title
                    else:
                        message = "Successfully added to your wanted list."
                fireEvent("notify.frontend", type="movie.added", data=movie_dict, message=message)

            return movie_dict
        except:
            log.error("Failed adding media: %s", traceback.format_exc())
    def _searchOnTitle(self, title, movie, quality, results):
	
        #returnResponse = requests.get(self.urls['test'], verify=False)

        #url = self.urls['search'] % tryUrlencode('%s' % (title.replace(':', '')))
        url = self.urls['search'] % tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year']))
        log.debug('>>>> yourbittorrent url %s', (url))		
        #print url

        data = self.getHTMLData(url)
        if data:
            html = BeautifulSoup(data)
            try:
                resultsTable = html.find_all('table', attrs = {'class' : 'bordered'})[1]
                if resultsTable is None:
                   log.debug('>>>> yourbittorrent EMPTY RESULT')
                   return

                #log.debug('result table %s', (resultsTable) )
                entries = resultsTable.find_all('tr')[1:-1]
                #log.debug('>>> result %s', (entries))
                
                for result in entries:
                    if result.find('td', attrs = {'class' : 'v'}) is None:
                        log.debug('>>> retornado')
                        continue
                    #log.debug('for result %s', result)
                    torrent_details = result.find_all('td')[0].find('a')['href']
                    torrent_split = result.find_all('td')[0].find('a')['href'].split('/')
                    torrent_id = torrent_split[2] 
                    torrent_title = result.find_all('td')[0].find('a').get_text()                                     
					
                    log.debug('>>>torrent_t %s', (torrent_title))
                    #torrent_title = [x.extract() for x in a.findAll('script')]	
                    torrent_seeders = tryInt(result.find('td', attrs = {'class' : 'u'}).string)
                    torrent_leechers = tryInt(result.find('td', attrs = {'class' : 'd'}).string)
                    torrent_size = self.parseSize(result.find('td', attrs = {'class' : 's'}).contents[0])                    
                    imdb_id = getImdb(torrent_title, check_inside = True)
					
					#get .torrent file
                    #down_url = self.urls['detail'] % torrent_details
                    #down_data = self.getHTMLData(down_url)
					
                    #if down_data:
					
                        #down_html = BeautifulSoup(down_data)
                        #results_down = down_html.find('ul', attrs = {'class' : 'download_links'})
						
                        #if results_down is None:
                        #    log.debug('>>>> yourbittorrent EMPTY RESULT', (url))
                        #    return;
							
                        #entries = resultsTable.find_all('li')[1]
						
                        #torrent_download = entries.find('a')['href']
					    ##################
                    #log.debug('>>>id %s', (torrent_id))
                    #log.debug('>>>title %s', (torrent_title))
                    #log.debug('>>> size %s', (torrent_size))
                    log.debug('>>> torrent_download %s', (self.urls['download'] % torrent_id))
					

                    results.append({
                       'id': torrent_id,
                       'name': torrent_title,
                       'url': self.urls['download'] % torrent_id,
                       'detail_url': self.urls['detail'] % torrent_details,
                       'size': torrent_size,
                       'seeders': torrent_seeders if torrent_seeders else 0,
                       'leechers': torrent_leechers if torrent_leechers else 0,
                       'description': imdb_id if imdb_id else '',
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Example #53
0
    def determineMovie(self, group):
        imdb_id = None

        files = group['files']

        # Check for CP(imdb_id) string in the file paths
        for cur_file in files['movie']:
            imdb_id = self.getCPImdb(cur_file)
            if imdb_id:
                log.debug('Found movie via CP tag: %s', cur_file)
                break

        # Check and see if nfo contains the imdb-id
        if not imdb_id:
            try:
                for nfo_file in files['nfo']:
                    imdb_id = getImdb(nfo_file)
                    if imdb_id:
                        log.debug('Found movie via nfo file: %s', nfo_file)
                        break
            except:
                pass

        # Check and see if filenames contains the imdb-id
        if not imdb_id:
            try:
                for filetype in files:
                    for filetype_file in files[filetype]:
                        imdb_id = getImdb(filetype_file, check_inside = False)
                        if imdb_id:
                            log.debug('Found movie via imdb in filename: %s', nfo_file)
                            break
            except:
                pass

        # Check if path is already in db
        if not imdb_id:
            db = get_session()
            for cur_file in files['movie']:
                f = db.query(File).filter_by(path = toUnicode(cur_file)).first()
                try:
                    imdb_id = f.library[0].identifier
                    log.debug('Found movie via database: %s', cur_file)
                    break
                except:
                    pass
            #db.close()

        # Search based on OpenSubtitleHash
        if not imdb_id and not group['is_dvd']:
            for cur_file in files['movie']:
                movie = fireEvent('movie.by_hash', file = cur_file, merge = True)

                if len(movie) > 0:
                    imdb_id = movie[0]['imdb']
                    if imdb_id:
                        log.debug('Found movie via OpenSubtitleHash: %s', cur_file)
                        break

        # Search based on identifiers
        if not imdb_id:
            for identifier in group['identifiers']:

                if len(identifier) > 2:
                    try: filename = list(group['files'].get('movie'))[0]
                    except: filename = None

                    name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None)
                    if name_year.get('name') and name_year.get('year'):
                        movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1)

                        if len(movie) > 0:
                            imdb_id = movie[0]['imdb']
                            log.debug('Found movie via search: %s', cur_file)
                            if imdb_id: break
                else:
                    log.debug('Identifier to short to use for search: %s', identifier)

        if imdb_id:
            return fireEvent('library.add', attrs = {
                'identifier': imdb_id
            }, update_after = False, single = True)

        log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers'])
        return {}
Example #54
0
    def add(self,
            params=None,
            force_readd=True,
            search_after=True,
            update_after=True,
            notify_after=True,
            status=None):
        if not params: params = {}

        # Make sure it's a correct zero filled imdb id
        params['identifier'] = getImdb(params.get('identifier', ''))

        if not params.get('identifier'):
            msg = 'Can\'t add movie without imdb identifier.'
            log.error(msg)
            fireEvent('notify.frontend', type='movie.is_tvshow', message=msg)
            return False
        elif not params.get('info'):
            try:
                is_movie = fireEvent('movie.is_movie',
                                     identifier=params.get('identifier'),
                                     adding=True,
                                     single=True)
                if not is_movie:
                    msg = 'Can\'t add movie, seems to be a TV show.'
                    log.error(msg)
                    fireEvent('notify.frontend',
                              type='movie.is_tvshow',
                              message=msg)
                    return False
            except:
                pass

        info = params.get('info')
        if not info or (info and len(info.get('titles', [])) == 0):
            info = fireEvent('movie.info',
                             merge=True,
                             extended=False,
                             identifier=params.get('identifier'))

        # Allow force re-add overwrite from param
        if 'force_readd' in params:
            fra = params.get('force_readd')
            force_readd = fra.lower() not in [
                '0', '-1'
            ] if not isinstance(fra, bool) else fra

        # Set default title
        def_title = self.getDefaultTitle(info)

        # Default profile and category
        default_profile = {}
        if (not params.get('profile_id') and status != 'done') or params.get(
                'ignore_previous', False):
            default_profile = fireEvent('profile.default', single=True)
        cat_id = params.get('category_id')

        try:
            db = get_db()

            media = {
                '_t':
                'media',
                'type':
                'movie',
                'title':
                def_title,
                'identifiers': {
                    'imdb': params.get('identifier')
                },
                'status':
                status if status else 'active',
                'profile_id':
                params.get('profile_id') or default_profile.get('_id'),
                'category_id':
                cat_id if cat_id is not None and len(cat_id) > 0
                and cat_id != '-1' else None,
            }

            # Update movie info
            try:
                del info['in_wanted']
            except:
                pass
            try:
                del info['in_library']
            except:
                pass
            media['info'] = info

            new = False
            previous_profile = None
            try:
                m = db.get('media',
                           'imdb-%s' % params.get('identifier'),
                           with_doc=True)['doc']

                try:
                    db.get('id', m.get('profile_id'))
                    previous_profile = m.get('profile_id')
                except RecordNotFound:
                    pass
                except:
                    log.error('Failed getting previous profile: %s',
                              traceback.format_exc())
            except:
                new = True
                m = db.insert(media)

            # Update dict to be usable
            m.update(media)

            added = True
            do_search = False
            search_after = search_after and self.conf('search_on_add',
                                                      section='moviesearcher')
            onComplete = None

            if new:
                if search_after:
                    onComplete = self.createOnComplete(m['_id'])
                search_after = False
            elif force_readd:

                # Clean snatched history
                for release in fireEvent('release.for_media',
                                         m['_id'],
                                         single=True):
                    if release.get('status') in [
                            'downloaded', 'snatched', 'seeding', 'done'
                    ]:
                        if params.get('ignore_previous', False):
                            fireEvent('release.update_status',
                                      release['_id'],
                                      status='ignored')
                        else:
                            fireEvent('release.delete',
                                      release['_id'],
                                      single=True)

                m['profile_id'] = (
                    params.get('profile_id') or default_profile.get('_id')
                ) if not previous_profile else previous_profile
                m['category_id'] = cat_id if cat_id is not None and len(
                    cat_id) > 0 else (m.get('category_id') or None)
                m['last_edit'] = int(time.time())
                m['tags'] = []

                do_search = True
                db.update(m)
            else:
                try:
                    del params['info']
                except:
                    pass
                log.debug('Movie already exists, not updating: %s', params)
                added = False

            # Trigger update info
            if added and update_after:
                # Do full update to get images etc
                fireEventAsync('movie.update',
                               m['_id'],
                               default_title=params.get('title'),
                               on_complete=onComplete)

            # Remove releases
            for rel in fireEvent('release.for_media', m['_id'], single=True):
                if rel['status'] is 'available':
                    db.delete(rel)

            movie_dict = fireEvent('media.get', m['_id'], single=True)
            if not movie_dict:
                log.debug('Failed adding media, can\'t find it anymore')
                return False

            if do_search and search_after:
                onComplete = self.createOnComplete(m['_id'])
                onComplete()

            if added and notify_after:

                if params.get('title'):
                    message = 'Successfully added "%s" to your wanted list.' % params.get(
                        'title', '')
                else:
                    title = getTitle(m)
                    if title:
                        message = 'Successfully added "%s" to your wanted list.' % title
                    else:
                        message = 'Successfully added to your wanted list.'
                fireEvent('notify.frontend',
                          type='movie.added',
                          data=movie_dict,
                          message=message)

            return movie_dict
        except:
            log.error('Failed adding media: %s', traceback.format_exc())
Example #55
0
    def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
        if not params: params = {}

        # Make sure it's a correct zero filled imdb id
        params['identifier'] = getImdb(params.get('identifier', ''))

        if not params.get('identifier'):
            msg = 'Can\'t add movie without imdb identifier.'
            log.error(msg)
            fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
            return False
        elif not params.get('info'):
            try:
                is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True)
                if not is_movie:
                    msg = 'Can\'t add movie, seems to be a TV show.'
                    log.error(msg)
                    fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
                    return False
            except:
                pass

        info = params.get('info')
        if not info or (info and len(info.get('titles', [])) == 0):
            info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier'))

        # Set default title
        default_title = toUnicode(info.get('title'))
        titles = info.get('titles', [])
        counter = 0
        def_title = None
        for title in titles:
            if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
                def_title = toUnicode(title)
                break
            counter += 1

        if not def_title:
            def_title = toUnicode(titles[0])

        # Default profile and category
        default_profile = {}
        if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
            default_profile = fireEvent('profile.default', single = True)
        cat_id = params.get('category_id')

        try:
            db = get_db()

            media = {
                '_t': 'media',
                'type': 'movie',
                'title': def_title,
                'identifiers': {
                    'imdb': params.get('identifier')
                },
                'status': status if status else 'active',
                'profile_id': params.get('profile_id') or default_profile.get('_id'),
                'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
            }

            # Update movie info
            try: del info['in_wanted']
            except: pass
            try: del info['in_library']
            except: pass
            media['info'] = info

            new = False
            previous_profile = None
            try:
                m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']

                try:
                    db.get('id', m.get('profile_id'))
                    previous_profile = m.get('profile_id')
                except RecordNotFound:
                    pass
                except:
                    log.error('Failed getting previous profile: %s', traceback.format_exc())
            except:
                new = True
                m = db.insert(media)

            # Update dict to be usable
            m.update(media)

            added = True
            do_search = False
            search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
            onComplete = None

            if new:
                if search_after:
                    onComplete = self.createOnComplete(m['_id'])
                search_after = False
            elif force_readd:

                # Clean snatched history
                for release in fireEvent('release.for_media', m['_id'], single = True):
                    if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
                        if params.get('ignore_previous', False):
                            fireEvent('release.update_status', release['_id'], status = 'ignored')
                        else:
                            fireEvent('release.delete', release['_id'], single = True)

                m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
                m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
                m['last_edit'] = int(time.time())
                m['tags'] = []

                do_search = True
                db.update(m)
            else:
                try: del params['info']
                except: pass
                log.debug('Movie already exists, not updating: %s', params)
                added = False

            # Trigger update info
            if added and update_after:
                # Do full update to get images etc
                fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete)

            # Remove releases
            for rel in fireEvent('release.for_media', m['_id'], single = True):
                if rel['status'] is 'available':
                    db.delete(rel)

            movie_dict = fireEvent('media.get', m['_id'], single = True)
            if not movie_dict:
                log.debug('Failed adding media, can\'t find it anymore')
                return False

            if do_search and search_after:
                onComplete = self.createOnComplete(m['_id'])
                onComplete()

            if added and notify_after:

                if params.get('title'):
                    message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')
                else:
                    title = getTitle(m)
                    if title:
                        message = 'Successfully added "%s" to your wanted list.' % title
                    else:
                        message = 'Successfully added to your wanted list.'
                fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message)

            return movie_dict
        except:
            log.error('Failed adding media: %s', traceback.format_exc())
Example #56
0
 def getMovie(self, url):
     try:
         data = self.getUrl(url)
     except:
         data = ''
     return self.getInfo(getImdb(data))