コード例 #1
0
ファイル: pushover.py プロジェクト: michaelsexton/WhatPotato
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        api_data = {
            'user': self.conf('user_key'),
            'token': self.conf('api_token'),
            'message': toUnicode(message),
            'priority': self.conf('priority'),
            'sound': self.conf('sound'),
        }

        if data and getIdentifier(data):
            api_data.update({
                'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)),
                'url_title': toUnicode('%s on IMDb' % getTitle(data)),
            })

        try:
            data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'),
                headers = {'Content-type': 'application/x-www-form-urlencoded'},
                data = api_data)
            log.info2('Pushover responded with: %s', data)
            return True
        except:
            return False
コード例 #2
0
ファイル: main.py プロジェクト: michaelsexton/WhatPotato
    def updateReleaseDate(self, media_id):
        """
        Update release_date (eta) info only

        @param media_id: document id
        @return: dict, with dates dvd, theater, bluray, expires
        """

        try:
            db = get_db()

            media = db.get('id', media_id)

            if not media.get('info'):
                media = self.update(media_id)
                dates = media.get('info', {}).get('release_date')
            else:
                dates = media.get('info').get('release_date')

            if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
                dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True)
                media['info'].update({'release_date': dates})
                db.update(media)

            return dates
        except:
            log.error('Failed updating release dates: %s', traceback.format_exc())

        return {}
コード例 #3
0
 def buildUrl(self, media, host):
     arguments = tryUrlencode({
         'user': host['name'],
         'passkey': host['pass_key'],
         'imdbid': getIdentifier(media),
     })
     return '%s?%s' % (host['host'], arguments)
コード例 #4
0
    def findViaAlternative(self, group):
        results = {"480p": [], "720p": [], "1080p": []}

        movie_name = getTitle(group)

        url = "%s?%s" % (self.urls["backup"], tryUrlencode({"s": movie_name}))
        try:
            data = self.getCache("hdtrailers.alt.%s" % getIdentifier(group), url, show_error=False)
        except HTTPError:
            log.debug("No alternative page found for: %s", movie_name)
            data = None

        if not data:
            return results

        try:
            html = BeautifulSoup(data, parse_only=self.only_tables_tags)
            result_table = html.find_all("h2", text=re.compile(movie_name))

            for h2 in result_table:
                if "trailer" in h2.lower():
                    parent = h2.parent.parent.parent
                    trailerLinks = parent.find_all("a", text=re.compile("480p|720p|1080p"))
                    try:
                        for trailer in trailerLinks:
                            results[trailer].insert(0, trailer.parent["href"])
                    except:
                        pass

        except AttributeError:
            log.debug("No trailers found in via alternative.")

        return results
コード例 #5
0
    def search(self, group):

        movie_name = getTitle(group)

        url = self.urls["api"] % self.movieUrlName(movie_name)
        try:
            data = self.getCache("hdtrailers.%s" % getIdentifier(group), url, show_error=False)
        except HTTPError:
            log.debug("No page found for: %s", movie_name)
            data = None

        result_data = {"480p": [], "720p": [], "1080p": []}

        if not data:
            return result_data

        did_alternative = False
        for provider in self.providers:
            results = self.findByProvider(data, provider)

            # Find alternative
            if results.get("404") and not did_alternative:
                results = self.findViaAlternative(group)
                did_alternative = True

            result_data = mergeDicts(result_data, results)

        return result_data
コード例 #6
0
    def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None):

        # Combine with previous suggestion_cache
        cached_suggestion = self.getCache('suggestion_cached') or []
        new_suggestions = []
        ignored = [] if not ignored else ignored
        seen = [] if not seen else seen

        if ignore_imdb:
            suggested_imdbs = []
            for cs in cached_suggestion:
                if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs:
                    suggested_imdbs.append(cs.get('imdb'))
                    new_suggestions.append(cs)

        # Get new results and add them
        if len(new_suggestions) - 1 < limit:
            active_movies = fireEvent('media.with_status', ['active', 'done'], single = True)
            movies = [getIdentifier(x) for x in active_movies]
            movies.extend(seen)

            ignored.extend([x.get('imdb') for x in cached_suggestion])
            suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True)

            if suggestions:
                new_suggestions.extend(suggestions)

        self.setCache('suggestion_cached', new_suggestions, timeout = 3024000)

        return new_suggestions
コード例 #7
0
    def _search(self, media, quality, results):

        data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', '')))

        if data:

            cat_ids = self.getCatId(quality)
            table_order = ['name', 'size', None, 'age', 'seeds', 'leechers']

            try:
                html = BeautifulSoup(data)
                resultdiv = html.find('div', attrs = {'class': 'tabs'})
                for result in resultdiv.find_all('div', recursive = False):
                    if result.get('id').lower().strip('tab-') not in cat_ids:
                        continue

                    try:
                        for temp in result.find_all('tr'):
                            if temp['class'] is 'firstr' or not temp.get('id'):
                                continue

                            new = {}

                            nr = 0
                            for td in temp.find_all('td'):
                                column_name = table_order[nr]
                                if column_name:

                                    if column_name == 'name':
                                        link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
                                        new['id'] = temp.get('id')[-7:]
                                        new['name'] = link.text
                                        new['url'] = td.find('a', {'href': re.compile('magnet:*')})['href']
                                        new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
                                        new['verified'] = True if td.find('i', {'class': re.compile('verify')}) else False
                                        new['score'] = 100 if new['verified'] else 0
                                    elif column_name is 'size':
                                        new['size'] = self.parseSize(td.text)
                                    elif column_name is 'age':
                                        new['age'] = self.ageToDays(td.text)
                                    elif column_name is 'seeds':
                                        new['seeders'] = tryInt(td.text)
                                    elif column_name is 'leechers':
                                        new['leechers'] = tryInt(td.text)

                                nr += 1

                            # Only store verified torrents
                            if self.conf('only_verified') and not new['verified']:
                                continue

                            results.append(new)
                    except:
                        log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())

            except AttributeError:
                log.debug('No search results found.')
コード例 #8
0
ファイル: base.py プロジェクト: michaelsexton/WhatPotato
    def create(self, message=None, group=None):
        if self.isDisabled():
            return
        if not group:
            group = {}

        log.info("Creating %s metadata.", self.getName())

        # Update library to get latest info
        try:
            group["media"] = fireEvent(
                "movie.update",
                group["media"].get("_id"),
                identifier=getIdentifier(group["media"]),
                extended=True,
                single=True,
            )
        except:
            log.error("Failed to update movie, before creating metadata: %s", traceback.format_exc())

        root_name = self.getRootName(group)
        meta_name = os.path.basename(root_name)
        root = os.path.dirname(root_name)

        movie_info = group["media"].get("info")

        for file_type in ["nfo"]:
            try:
                self._createType(meta_name, root, movie_info, group, file_type, 0)
            except:
                log.error("Unable to create %s file: %s", ("nfo", traceback.format_exc()))

        for file_type in [
            "thumbnail",
            "fanart",
            "banner",
            "disc_art",
            "logo",
            "clear_art",
            "landscape",
            "extra_thumbs",
            "extra_fanart",
        ]:
            try:
                if file_type == "thumbnail":
                    num_images = len(movie_info["images"]["poster_original"])
                elif file_type == "fanart":
                    num_images = len(movie_info["images"]["backdrop_original"])
                else:
                    num_images = len(movie_info["images"][file_type])

                for i in range(num_images):
                    self._createType(meta_name, root, movie_info, group, file_type, i)
            except:
                log.error("Unable to create %s file: %s", (file_type, traceback.format_exc()))
コード例 #9
0
    def suggestView(self, limit = 6, **kwargs):
        if self.isDisabled():
            return {
                'success': True,
                'movies': []
            }

        movies = splitString(kwargs.get('movies', ''))
        ignored = splitString(kwargs.get('ignored', ''))
        seen = splitString(kwargs.get('seen', ''))

        cached_suggestion = self.getCache('suggestion_cached')
        if cached_suggestion:
            suggestions = cached_suggestion
        else:

            if not movies or len(movies) == 0:
                active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True)
                movies = [getIdentifier(x) for x in active_movies]

            if not ignored or len(ignored) == 0:
                ignored = splitString(Env.prop('suggest_ignore', default = ''))
            if not seen or len(seen) == 0:
                movies.extend(splitString(Env.prop('suggest_seen', default = '')))

            suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
            self.setCache('suggestion_cached', suggestions, timeout = 6048000)  # Cache for 10 weeks

        medias = []
        for suggestion in suggestions[:int(limit)]:

            # Cache poster
            posters = suggestion.get('images', {}).get('poster', [])
            poster = [x for x in posters if 'tmdb' in x]
            posters = poster if len(poster) > 0 else posters

            cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False
            files = {'image_poster': [cached_poster] } if cached_poster else {}

            medias.append({
                'status': 'suggested',
                'title': getTitle(suggestion),
                'type': 'movie',
                'info': suggestion,
                'files': files,
                'identifiers': {
                    'imdb': suggestion.get('imdb')
                }
            })

        return {
            'success': True,
            'movies': medias
        }
コード例 #10
0
ファイル: webhook.py プロジェクト: michaelsexton/WhatPotato
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        post_data = {
            'message': toUnicode(message)
        }

        if getIdentifier(data):
            post_data.update({
                'imdb_id': getIdentifier(data)
            })

        headers = {
            'Content-type': 'application/x-www-form-urlencoded'
        }

        try:
            self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False)
            return True
        except:
            log.error('Webhook notification failed: %s', traceback.format_exc())

        return False
コード例 #11
0
ファイル: base.py プロジェクト: michaelsexton/WhatPotato
    def cpTag(self, media, unique_tag = False):

        tag = ''
        if Env.setting('enabled', 'renamer') or unique_tag:
            identifier = getIdentifier(media) or ''
            unique_tag = ', ' + randomString() if unique_tag else ''

            tag = '.cp('
            tag += identifier
            tag += ', ' if unique_tag and identifier else ''
            tag += randomString() if unique_tag else ''
            tag += ')'

        return tag if len(tag) > 7 else ''
コード例 #12
0
ファイル: awesomehd.py プロジェクト: michaelsexton/WhatPotato
    def _search(self, movie, quality, results):

        data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal')))

        if data:
            try:
                soup = BeautifulSoup(data)

                if soup.find('error'):
                    log.error(soup.find('error').get_text())
                    return

                authkey = soup.find('authkey').get_text()
                entries = soup.find_all('torrent')

                for entry in entries:

                    torrentscore = 0
                    torrent_id = entry.find('id').get_text()
                    name = entry.find('name').get_text()
                    year = entry.find('year').get_text()
                    releasegroup = entry.find('releasegroup').get_text()
                    resolution = entry.find('resolution').get_text()
                    encoding = entry.find('encoding').get_text()
                    freeleech = entry.find('freeleech').get_text()
                    torrent_desc = '/ %s / %s / %s ' % (releasegroup, resolution, encoding)

                    if freeleech == '0.25' and self.conf('prefer_internal'):
                        torrent_desc += '/ Internal'
                        torrentscore += 200

                    if encoding == 'x264' and self.conf('favor') in ['encode', 'both']:
                        torrentscore += 300
                    if re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']:
                        torrentscore += 200

                    results.append({
                        'id': torrent_id,
                        'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
                        'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': tryInt(entry.find('size').get_text()) / 1048576,
                        'seeders': tryInt(entry.find('seeders').get_text()),
                        'leechers': tryInt(entry.find('leechers').get_text()),
                        'score': torrentscore
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
コード例 #13
0
ファイル: binsearch.py プロジェクト: michaelsexton/WhatPotato
 def buildUrl(self, media, quality):
     query = tryUrlencode(
         {
             "q": getIdentifier(media),
             "m": "n",
             "max": 400,
             "adv_age": Env.setting("retention", "nzb"),
             "adv_sort": "date",
             "adv_col": "on",
             "adv_nfo": "on",
             "minsize": quality.get("size_min"),
             "maxsize": quality.get("size_max"),
         }
     )
     return query
コード例 #14
0
ファイル: searcher.py プロジェクト: michaelsexton/WhatPotato
    def searchAll(self, manual = False):

        if self.in_progress:
            log.info('Search already in progress')
            fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress')
            return

        self.in_progress = True
        fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started')

        medias = [x['_id'] for x in fireEvent('media.with_status', 'active', types = 'movie', with_doc = False, single = True)]
        random.shuffle(medias)

        total = len(medias)
        self.in_progress = {
            'total': total,
            'to_go': total,
        }

        try:
            search_protocols = fireEvent('searcher.protocols', single = True)

            for media_id in medias:

                media = fireEvent('media.get', media_id, single = True)
                if not media: continue

                try:
                    self.single(media, search_protocols, manual = manual)
                except IndexError:
                    log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc()))
                    fireEvent('movie.update', media_id)
                except:
                    log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc()))

                self.in_progress['to_go'] -= 1

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

        except SearchSetupError:
            pass

        self.in_progress = False
コード例 #15
0
ファイル: trakt.py プロジェクト: michaelsexton/WhatPotato
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        if listener == 'test':
            result = self.call((self.urls['test']))

            return result

        else:

            post_data = {
                'movies': [{'ids': {'imdb': getIdentifier(data)}}] if data else []
            }

            result = self.call((self.urls['library']), post_data)
            if self.conf('remove_watchlist_enabled'):
                result = result and self.call((self.urls['unwatchlist']), post_data)

            return result
コード例 #16
0
ファイル: hdbits.py プロジェクト: michaelsexton/WhatPotato
    def _search(self, movie, quality, results):

        match = re.match(r'tt(\d{7})', getIdentifier(movie))

        data = self._post_query(imdb = {'id': match.group(1)})

        if data:
            try:
                for result in data:
                    results.append({
                        'id': result['id'],
                        'name': result['name'],
                        'url': self.urls['download'] % (result['id'], self.conf('passkey')),
                        'detail_url': self.urls['detail'] % result['id'],
                        'size': tryInt(result['size']) / 1024 / 1024,
                        'seeders': tryInt(result['seeders']),
                        'leechers': tryInt(result['leechers'])
                    })
            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
コード例 #17
0
ファイル: searcher.py プロジェクト: michaelsexton/WhatPotato
    def correctRelease(self, nzb = None, media = None, quality = None, **kwargs):

        if media.get('type') != 'movie': return

        media_title = fireEvent('searcher.get_search_title', media, single = True)

        imdb_results = kwargs.get('imdb_results', False)
        retention = Env.setting('retention', section = 'nzb')

        if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
            log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
            return False

        # Check for required and ignored words
        if not fireEvent('searcher.correct_words', nzb['name'], media, single = True):
            return False

        preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True)

        # Contains lower quality string
        contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True)
        if contains_other and isinstance(contains_other, dict):
            log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality'))
            return False

        # Contains lower quality string
        if not fireEvent('searcher.correct_3d', nzb, preferred_quality = preferred_quality, single = True):
            log.info2('Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label']))
            return False

        # File to small
        if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(nzb['size']):
            log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min']))
            return False

        # File to large
        if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(nzb['size']):
            log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max']))
            return False

        # Provider specific functions
        get_more = nzb.get('get_more_info')
        if get_more:
            get_more(nzb)

        extra_check = nzb.get('extra_check')
        if extra_check and not extra_check(nzb):
            return False


        if imdb_results:
            return True

        # Check if nzb contains imdb link
        if getImdb(nzb.get('description', '')) == getIdentifier(media):
            return True

        for raw_title in media['info']['titles']:
            for movie_title in possibleTitles(raw_title):
                movie_words = re.split('\W+', simplifyString(movie_title))

                if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True):
                    # if no IMDB link, at least check year range 1
                    if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 1, single = True):
                        return True

                    # if no IMDB link, at least check year
                    if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 0, single = True):
                        return True

        log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year']))
        return False
コード例 #18
0
ファイル: scanner.py プロジェクト: michaelsexton/WhatPotato
    def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, check_file_date = True, on_found = None):

        folder = sp(folder)

        if not folder or not os.path.isdir(folder):
            log.error('Folder doesn\'t exists: %s', folder)
            return {}

        # Get movie "master" files
        movie_files = {}
        leftovers = []

        # Scan all files of the folder if no files are set
        if not files:
            try:
                files = []
                for root, dirs, walk_files in os.walk(folder, followlinks=True):
                    files.extend([sp(os.path.join(sp(root), ss(filename))) for filename in walk_files])

                    # Break if CP wants to shut down
                    if self.shuttingDown():
                        break

            except:
                log.error('Failed getting files from %s: %s', (folder, traceback.format_exc()))

            log.debug('Found %s files to scan and group in %s', (len(files), folder))
        else:
            check_file_date = False
            files = [sp(x) for x in files]

        for file_path in files:

            if not os.path.exists(file_path):
                continue

            # Remove ignored files
            if self.isSampleFile(file_path):
                leftovers.append(file_path)
                continue
            elif not self.keepFile(file_path):
                continue

            is_dvd_file = self.isDVDFile(file_path)
            if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file

                # Normal identifier
                identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file)
                identifiers = [identifier]

                # Identifier with quality
                quality = fireEvent('quality.guess', files = [file_path], size = self.getFileSize(file_path), single = True) if not is_dvd_file else {'identifier':'dvdr'}
                if quality:
                    identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', ''))
                    identifiers = [identifier_with_quality, identifier]

                if not movie_files.get(identifier):
                    movie_files[identifier] = {
                        'unsorted_files': [],
                        'identifiers': identifiers,
                        'is_dvd': is_dvd_file,
                    }

                movie_files[identifier]['unsorted_files'].append(file_path)
            else:
                leftovers.append(file_path)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleanup
        del files

        # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
        # files will be grouped first.
        leftovers = set(sorted(leftovers, reverse = True))

        # Group files minus extension
        ignored_identifiers = []
        for identifier, group in movie_files.items():
            if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier)

            log.debug('Grouping files: %s', identifier)

            has_ignored = 0
            for file_path in list(group['unsorted_files']):
                ext = getExt(file_path)
                wo_ext = file_path[:-(len(ext) + 1)]
                found_files = set([i for i in leftovers if wo_ext in i])
                group['unsorted_files'].extend(found_files)
                leftovers = leftovers - found_files

                has_ignored += 1 if ext == 'ignore' else 0

            if has_ignored == 0:
                for file_path in list(group['unsorted_files']):
                    ext = getExt(file_path)
                    has_ignored += 1 if ext == 'ignore' else 0

            if has_ignored > 0:
                ignored_identifiers.append(identifier)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break


        # Create identifiers for all leftover files
        path_identifiers = {}
        for file_path in leftovers:
            identifier = self.createStringIdentifier(file_path, folder)

            if not path_identifiers.get(identifier):
                path_identifiers[identifier] = []

            path_identifiers[identifier].append(file_path)


        # Group the files based on the identifier
        delete_identifiers = []
        for identifier, found_files in path_identifiers.items():
            log.debug('Grouping files on identifier: %s', identifier)

            group = movie_files.get(identifier)
            if group:
                group['unsorted_files'].extend(found_files)
                delete_identifiers.append(identifier)

                # Remove the found files from the leftover stack
                leftovers = leftovers - set(found_files)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Group based on folder
        delete_identifiers = []
        for identifier, found_files in path_identifiers.items():
            log.debug('Grouping files on foldername: %s', identifier)

            for ff in found_files:
                new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder)

                group = movie_files.get(new_identifier)
                if group:
                    group['unsorted_files'].extend([ff])
                    delete_identifiers.append(identifier)

                    # Remove the found files from the leftover stack
                    leftovers -= leftovers - set([ff])

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # leftovers should be empty
        if leftovers:
            log.debug('Some files are still left over: %s', leftovers)

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Make sure we remove older / still extracting files
        valid_files = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = movie_files.popitem()
            except:
                break

            # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute
            if check_file_date:
                files_too_new, time_string = self.checkFilesChanged(group['unsorted_files'])
                if files_too_new:
                    log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            # Only process movies newer than x
            if newer_than and newer_than > 0:
                has_new_files = False
                for cur_file in group['unsorted_files']:
                    file_time = self.getFileTimes(cur_file)
                    if file_time[0] > newer_than or file_time[1] > newer_than:
                        has_new_files = True
                        break

                if not has_new_files:
                    log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            valid_files[identifier] = group

        del movie_files

        total_found = len(valid_files)

        # Make sure only one movie was found if a download ID is provided
        if release_download and total_found == 0:
            log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id'))
        elif release_download and total_found > 1:
            log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files)))
            release_download = None

        # Determine file types
        processed_movies = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = valid_files.popitem()
            except:
                break

            if return_ignored is False and identifier in ignored_identifiers:
                log.debug('Ignore file found, ignoring release: %s', identifier)
                total_found -= 1
                continue

            # Group extra (and easy) files first
            group['files'] = {
                'movie_extra': self.getMovieExtras(group['unsorted_files']),
                'subtitle': self.getSubtitles(group['unsorted_files']),
                'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']),
                'nfo': self.getNfo(group['unsorted_files']),
                'trailer': self.getTrailers(group['unsorted_files']),
                'leftover': set(group['unsorted_files']),
            }

            # Media files
            if group['is_dvd']:
                group['files']['movie'] = self.getDVDFiles(group['unsorted_files'])
            else:
                group['files']['movie'] = self.getMediaFiles(group['unsorted_files'])

            if len(group['files']['movie']) == 0:
                log.error('Couldn\'t find any movie files for %s', identifier)
                total_found -= 1
                continue

            log.debug('Getting metadata for %s', identifier)
            group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download)

            # Subtitle meta
            group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {}

            # Get parent dir from movie files
            for movie_file in group['files']['movie']:
                group['parentdir'] = os.path.dirname(movie_file)
                group['dirname'] = None

                folder_names = group['parentdir'].replace(folder, '').split(os.path.sep)
                folder_names.reverse()

                # Try and get a proper dirname, so no "A", "Movie", "Download" etc
                for folder_name in folder_names:
                    if folder_name.lower() not in self.ignore_names and len(folder_name) > 2:
                        group['dirname'] = folder_name
                        break

                break

            # Leftover "sorted" files
            for file_type in group['files']:
                if not file_type is 'leftover':
                    group['files']['leftover'] -= set(group['files'][file_type])
                    group['files'][file_type] = list(group['files'][file_type])
            group['files']['leftover'] = list(group['files']['leftover'])

            # Delete the unsorted list
            del group['unsorted_files']

            # Determine movie
            group['media'] = self.determineMedia(group, release_download = release_download)
            if not group['media']:
                log.error('Unable to determine media: %s', group['identifiers'])
            else:
                group['identifier'] = getIdentifier(group['media']) or group['media']['info'].get('imdb')

            processed_movies[identifier] = group

            # Notify parent & progress on something found
            if on_found:
                on_found(group, total_found, len(valid_files))

            # Wait for all the async events calm down a bit
            while threading.activeCount() > 100 and not self.shuttingDown():
                log.debug('Too many threads active, waiting a few seconds')
                time.sleep(10)

        if len(processed_movies) > 0:
            log.info('Found %s movies in the folder %s', (len(processed_movies), folder))
        else:
            log.debug('Found no movies in the folder %s', folder)

        return processed_movies
コード例 #19
0
ファイル: manage.py プロジェクト: michaelsexton/WhatPotato
    def updateLibrary(self, full = True):
        last_update_key = 'manage.last_update%s' % ('_full' if full else '')
        last_update = float(Env.prop(last_update_key, default = 0))

        if self.in_progress:
            log.info('Already updating library: %s', self.in_progress)
            return
        elif self.isDisabled() or (last_update > time.time() - 20):
            return

        self.in_progress = {}
        fireEvent('notify.frontend', type = 'manage.updating', data = True)

        try:

            directories = self.directories()
            directories.sort()
            added_identifiers = []

            # Add some progress
            for directory in directories:
                self.in_progress[os.path.normpath(directory)] = {
                    'started': False,
                    'eta': -1,
                    'total': None,
                    'to_go': None,
                }

            for directory in directories:
                folder = os.path.normpath(directory)
                self.in_progress[os.path.normpath(directory)]['started'] = tryInt(time.time())

                if not os.path.isdir(folder):
                    if len(directory) > 0:
                        log.error('Directory doesn\'t exist: %s', folder)
                    continue

                log.info('Updating manage library: %s', folder)
                fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder)

                onFound = self.createAddToLibrary(folder, added_identifiers)
                fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, check_file_date = False, on_found = onFound, single = True)

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

            # If cleanup option is enabled, remove offline files from database
            if self.conf('cleanup') and full and not self.shuttingDown():

                # Get movies with done status
                total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True)

                deleted_releases = []
                for done_movie in done_movies:
                    if getIdentifier(done_movie) not in added_identifiers:
                        fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all')
                    else:

                        releases = done_movie.get('releases', [])

                        for release in releases:
                            if release.get('files'):
                                brk = False
                                for file_type in release.get('files', {}):
                                    for release_file in release['files'][file_type]:
                                        # Remove release not available anymore
                                        if not os.path.isfile(sp(release_file)):
                                            fireEvent('release.clean', release['_id'])
                                            brk = True
                                            break
                                    if brk:
                                        break

                        # Check if there are duplicate releases (different quality) use the last one, delete the rest
                        if len(releases) > 1:
                            used_files = {}
                            for release in releases:
                                for file_type in release.get('files', {}):
                                    for release_file in release['files'][file_type]:
                                        already_used = used_files.get(release_file)

                                        if already_used:
                                            release_id = release['_id'] if already_used.get('last_edit', 0) > release.get('last_edit', 0) else already_used['_id']
                                            if release_id not in deleted_releases:
                                                fireEvent('release.delete', release_id, single = True)
                                                deleted_releases.append(release_id)
                                            break
                                        else:
                                            used_files[release_file] = release
                            del used_files

                    # Break if CP wants to shut down
                    if self.shuttingDown():
                        break

                if not self.shuttingDown():
                    db = get_db()
                    db.reindex()

            Env.prop(last_update_key, time.time())
        except:
            log.error('Failed updating library: %s', (traceback.format_exc()))

        while self.in_progress and len(self.in_progress) > 0 and not self.shuttingDown():

            delete_me = {}

            # noinspection PyTypeChecker
            for folder in self.in_progress:
                if self.in_progress[folder]['to_go'] <= 0:
                    delete_me[folder] = True

            for delete in delete_me:
                del self.in_progress[delete]

            time.sleep(1)

        fireEvent('notify.frontend', type = 'manage.updating', data = False)
        self.in_progress = False
コード例 #20
0
ファイル: rarbg.py プロジェクト: michaelsexton/WhatPotato
    def _search(self, movie, quality, results):
        hasresults = 0
        curryear = datetime.now().year
        movieid = getIdentifier(movie)

        try:
            movieyear = movie['info']['year']
        except:
            log.error('RARBG: Couldn\'t get movie year')
            movieyear = 0

        self.getToken()

        if (self._token != 0) and (movieyear == 0 or movieyear <= curryear):
            data = self.getJsonData(self.urls['search'] % (self._token, movieid, self.conf('min_seeders'),
                                                           self.conf('min_leechers'), self.conf('ranked_only')), headers = self.getRequestHeaders())

            if data:
                if 'error_code' in data:
                    if data['error'] == 'No results found':
                        log.debug('RARBG: No results returned from Rarbg')
                    else:
                        if data['error_code'] == 10:
                            log.error(data['error'], movieid)
                        else:
                            log.error('RARBG: There is an error in the returned JSON: %s', data['error'])
                else:
                    hasresults = 1

                try:
                    if hasresults:
                        for result in data['torrent_results']:
                            name = result['title']
                            titlesplit = re.split('-', name)
                            releasegroup = titlesplit[len(titlesplit)-1]

                            xtrainfo = self.find_info(name)
                            encoding = xtrainfo[0]
                            resolution = xtrainfo[1]
                            # source = xtrainfo[2]
                            pubdate = result['pubdate']  # .strip(' +0000')
                            try:
                                pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S +0000')
                                now = datetime.utcnow()
                                age = (now - pubdate).days
                            except ValueError:
                                log.debug('RARBG: Bad pubdate')
                                age = 0

                            torrentscore = self.conf('extra_score')
                            seeders = tryInt(result['seeders'])
                            torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)

                            if seeders == 0:
                                torrentscore = 0

                            sliceyear = result['pubdate'][0:4]
                            year = tryInt(sliceyear)

                            results.append({
                                'id': random.randint(100, 9999),
                                'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
                                'url': result['download'],
                                'detail_url': result['info_page'],
                                'size': tryInt(result['size']/1048576),  # rarbg sends in bytes
                                'seeders': tryInt(result['seeders']),
                                'leechers': tryInt(result['leechers']),
                                'age': tryInt(age),
                                'score': torrentscore
                            })

                except RuntimeError:
                    log.error('RARBG: Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
コード例 #21
0
ファイル: hd4free.py プロジェクト: michaelsexton/WhatPotato
    def _search(self, movie, quality, results):
        data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only')))

        if data:
            try:
                #for result in data[]:
                for key, result in data.iteritems():
                    if tryInt(result['total_results']) == 0:
                        return
                    torrentscore = self.conf('extra_score')
                    releasegroup = result['releasegroup']
                    resolution = result['resolution']
                    encoding = result['encoding']
                    freeleech = tryInt(result['freeleech'])
                    seeders = tryInt(result['seeders'])
                    torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)

                    if freeleech > 0 and self.conf('prefer_internal'):
                        torrent_desc += '/ Internal'
                        torrentscore += 200

                    if seeders == 0:
                        torrentscore = 0

                    name = result['release_name']
                    year = tryInt(result['year'])

                    results.append({
                        'id': tryInt(result['torrentid']),
                        'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
                        'url': self.urls['download'] % (result['torrentid'], result['torrentpass']),
                        'detail_url': self.urls['detail'] % result['torrentid'],
                        'size': tryInt(result['size']),
                        'seeders': tryInt(result['seeders']),
                        'leechers': tryInt(result['leechers']),
                        'age': tryInt(result['age']),
                        'score': torrentscore
                    })
            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
コード例 #22
0
    def _search(self, media, quality, results):

        movie_title = getTitle(media)
        quality_id = quality['identifier']

        params = mergeDicts(self.quality_search_params[quality_id].copy(), {
            'order_by': 'relevance',
            'order_way': 'descending',
            'searchstr': getIdentifier(media)
        })

        url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
        res = self.getJsonData(url)

        try:
            if not 'Movies' in res:
                return

            authkey = res['AuthKey']
            passkey = res['PassKey']

            for ptpmovie in res['Movies']:
                if not 'Torrents' in ptpmovie:
                    log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year']))
                    continue

                log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents'])))
                for torrent in ptpmovie['Torrents']:
                    torrent_id = tryInt(torrent['Id'])
                    torrentdesc = '%s %s %s' % (torrent['Resolution'], torrent['Source'], torrent['Codec'])
                    torrentscore = 0

                    if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']:
                        torrentdesc += ' HQ'
                        if self.conf('prefer_golden'):
                            torrentscore += 5000
                    if 'FreeleechType' in torrent:
                        torrentdesc += ' Freeleech'
                        if self.conf('prefer_freeleech'):
                            torrentscore += 7000
                    if 'Scene' in torrent and torrent['Scene']:
                        torrentdesc += ' Scene'
                        if self.conf('prefer_scene'):
                            torrentscore += 2000
                    if 'RemasterTitle' in torrent and torrent['RemasterTitle']:
                        torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle'])

                    torrentdesc += ' (%s)' % quality_id
                    torrent_name = re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) - %s' % (movie_title, ptpmovie['Year'], torrentdesc))

                    def extra_check(item):
                        return self.torrentMeetsQualitySpec(item, quality_id)

                    results.append({
                        'id': torrent_id,
                        'name': torrent_name,
                        'Source': torrent['Source'],
                        'Checked': 'true' if torrent['Checked'] else 'false',
                        'Resolution': torrent['Resolution'],
                        'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())),
                        'size': tryInt(torrent['Size']) / (1024 * 1024),
                        'seeders': tryInt(torrent['Seeders']),
                        'leechers': tryInt(torrent['Leechers']),
                        'score': torrentscore,
                        'extra_check': extra_check,
                    })

        except:
            log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
コード例 #23
0
ファイル: main.py プロジェクト: michaelsexton/WhatPotato
    def update(self, media_id = None, identifier = None, default_title = None, extended = False):
        """
        Update movie information inside media['doc']['info']

        @param media_id: document id
        @param default_title: default title, if empty, use first one or existing one
        @param extended: update with extended info (parses more info, actors, images from some info providers)
        @return: dict, with media
        """

        if self.shuttingDown():
            return

        lock_key = 'media.get.%s' % media_id if media_id else identifier
        self.acquireLock(lock_key)

        media = {}
        try:
            db = get_db()

            if media_id:
                media = db.get('id', media_id)
            else:
                media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc']

            info = fireEvent('movie.info', merge = True, extended = extended, identifier = getIdentifier(media))

            # Don't need those here
            try: del info['in_wanted']
            except: pass
            try: del info['in_library']
            except: pass

            if not info or len(info) == 0:
                log.error('Could not update, no movie info to work with: %s', identifier)
                return False

            # Update basic info
            media['info'] = info

            titles = info.get('titles', [])
            log.debug('Adding titles: %s', titles)

            # Define default title
            if default_title or media.get('title') == 'UNKNOWN' or len(media.get('title', '')) == 0:
                media['title'] = self.getDefaultTitle(info, default_title)

            # Files
            image_urls = info.get('images', [])

            self.getPoster(media, image_urls)

            db.update(media)
        except:
            log.error('Failed update media: %s', traceback.format_exc())

        self.releaseLock(lock_key)
        return media