Пример #1
0
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        api_data = {
            'user': self.conf('user_key'),
            'token': self.conf('api_token'),
            'message': toUnicode(message),
            'priority': self.conf('priority'),
            'sound': self.conf('sound'),
        }

        if data and getIdentifier(data):
            api_data.update({
                'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)),
                'url_title': toUnicode('%s on IMDb' % getTitle(data)),
            })

        try:
            data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'),
                headers = {'Content-type': 'application/x-www-form-urlencoded'},
                data = api_data)
            log.info2('Pushover responded with: %s', data)
            return True
        except:
            return False
Пример #2
0
    def searchAll(self, manual=False):

        if self.in_progress:
            log.info("Search already in progress")
            fireEvent(
                "notify.frontend",
                type="movie.searcher.already_started",
                data=True,
                message="Full search already in progress",
            )
            return

        self.in_progress = True
        fireEvent("notify.frontend", type="movie.searcher.started", data=True, message="Full search started")

        medias = [x["_id"] for x in fireEvent("media.with_status", "active", with_doc=False, single=True)]
        random.shuffle(medias)

        total = len(medias)
        self.in_progress = {"total": total, "to_go": total}

        try:
            search_protocols = fireEvent("searcher.protocols", single=True)

            for media_id in medias:

                media = fireEvent("media.get", media_id, single=True)

                try:
                    self.single(media, search_protocols, manual=manual)
                except IndexError:
                    log.error(
                        "Forcing library update for %s, if you see this often, please report: %s",
                        (getIdentifier(media), traceback.format_exc()),
                    )
                    fireEvent("movie.update_info", media_id)
                except:
                    log.error("Search failed for %s: %s", (getIdentifier(media), traceback.format_exc()))

                self.in_progress["to_go"] -= 1

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

        except SearchSetupError:
            pass

        self.in_progress = False
Пример #3
0
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        # Get configuration data
        token = self.conf('bot_token')
        usr_id = self.conf('receiver_user_id')

        # Add IMDB url to message:
        if data:
            imdb_id = getIdentifier(data)
            if imdb_id:
                url = 'http://www.imdb.com/title/{0}/'.format(imdb_id)
                message = '{0}\n{1}'.format(message, url)

        # Cosntruct message
        payload = {'chat_id': usr_id, 'text': message, 'parse_mode': 'Markdown'}

        # Send message user Telegram's Bot API
        response = requests.post(self.TELEGRAM_API % (token, "sendMessage"), data=payload)

        # Error logging
        sent_successfuly = True
        if not response.status_code == 200:
            log.error('Could not send notification to TelegramBot (token=%s). Response: [%s]', (token, response.text))
            sent_successfuly = False

        return sent_successfuly
Пример #4
0
    def updateReleaseDate(self, media_id):
        """
        Update release_date (eta) info only

        @param media_id: document id
        @return: dict, with dates dvd, theater, bluray, expires
        """

        try:
            db = get_db()

            media = db.get('id', media_id)

            if not media.get('info'):
                media = self.update(media_id)
                dates = media.get('info', {}).get('release_date')
            else:
                dates = media.get('info').get('release_date')

            if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
                dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True)
                media['info'].update({'release_date': dates})
                db.update(media)

            return dates
        except:
            log.error('Failed updating release dates: %s', traceback.format_exc())

        return {}
Пример #5
0
    def search(self, group):

        movie_name = getTitle(group)

        url = self.urls['api'] % self.movieUrlName(movie_name)
        try:
            data = self.getCache('hdtrailers.%s' % getIdentifier(group), url, show_error = False)
        except HTTPError:
            log.debug('No page found for: %s', movie_name)
            data = None

        result_data = {'480p': [], '720p': [], '1080p': []}

        if not data:
            return result_data

        did_alternative = False
        for provider in self.providers:
            results = self.findByProvider(data, provider)

            # Find alternative
            if results.get('404') and not did_alternative:
                results = self.findViaAlternative(group)
                did_alternative = True

            result_data = mergeDicts(result_data, results)

        return result_data
Пример #6
0
    def _search(self, movie, quality, results):

        search_url = self.urls['search'] % (self.getDomain(), getIdentifier(movie), quality['identifier'])

        data = self.getJsonData(search_url)

        if data and data.get('MovieList'):
            try:
                for result in data.get('MovieList'):

                    try:
                        title = result['TorrentUrl'].split('/')[-1][:-8].replace('_', '.').strip('._')
                        title = title.replace('.-.', '-')
                        title = title.replace('..', '.')
                    except:
                        continue

                    results.append({
                        'id': result['MovieID'],
                        'name': title,
                        'url': result['TorrentMagnetUrl'],
                        'detail_url': self.urls['detail'] % (self.getDomain(), result['MovieID']),
                        'size': self.parseSize(result['Size']),
                        'seeders': tryInt(result['TorrentSeeds']),
                        'leechers': tryInt(result['TorrentPeers'])
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #7
0
    def _search(self, movie, quality, results):

        domain = self.getDomain()
        if not domain:
            return

        search_url = self.urls['search'] % (domain, getIdentifier(movie), quality['identifier'])

        data = self.getJsonData(search_url)

        if data and data.get('MovieList'):
            try:
                for result in data.get('MovieList'):

                    if result['Quality'] and result['Quality'] not in result['MovieTitle']:
                        title = result['MovieTitle'] + ' BrRip ' + result['Quality']
                    else:
                        title = result['MovieTitle'] + ' BrRip'

                    results.append({
                        'id': result['MovieID'],
                        'name': title,
                        'url': result['TorrentMagnetUrl'],
                        'detail_url': self.urls['detail'] % (domain, result['MovieID']),
                        'size': self.parseSize(result['Size']),
                        'seeders': tryInt(result['TorrentSeeds']),
                        'leechers': tryInt(result['TorrentPeers']),
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #8
0
    def suggestView(self, limit = 6, **kwargs):

        movies = splitString(kwargs.get('movies', ''))
        ignored = splitString(kwargs.get('ignored', ''))
        seen = splitString(kwargs.get('seen', ''))

        cached_suggestion = self.getCache('suggestion_cached')
        if cached_suggestion:
            suggestions = cached_suggestion
        else:

            if not movies or len(movies) == 0:
                active_movies = fireEvent('media.with_status', ['active', 'done'], single = True)
                movies = [getIdentifier(x) for x in active_movies]

            if not ignored or len(ignored) == 0:
                ignored = splitString(Env.prop('suggest_ignore', default = ''))
            if not seen or len(seen) == 0:
                movies.extend(splitString(Env.prop('suggest_seen', default = '')))

            suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
            self.setCache('suggestion_cached', suggestions, timeout = 6048000)  # Cache for 10 weeks

        return {
            'success': True,
            'count': len(suggestions),
            'suggestions': suggestions[:int(limit)]
        }
Пример #9
0
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        if listener == 'test':

            post_data = {
                'username': self.conf('automation_username'),
                'password': self.conf('automation_password'),
            }

            result = self.call((self.urls['test'] % self.conf('automation_api_key')), post_data)

            return result

        else:

            post_data = {
                'username': self.conf('automation_username'),
                'password': self.conf('automation_password'),
                'movies': [{
                    'imdb_id': getIdentifier(data),
                    'title': getTitle(data),
                    'year': data['info']['year']
                }] if data else []
            }

            result = self.call((self.urls['library'] % self.conf('automation_api_key')), post_data)
            if self.conf('remove_watchlist_enabled'):
                result = result and self.call((self.urls['unwatchlist'] % self.conf('automation_api_key')), post_data)

            return result
Пример #10
0
 def buildUrl(self, media, host):
     arguments = tryUrlencode({
         'user': host['name'],
         'passkey': host['pass_key'],
         'imdbid': getIdentifier(media),
     })
     return '%s?%s' % (host['host'], arguments)
Пример #11
0
    def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None):

        # Combine with previous suggestion_cache
        cached_suggestion = self.getCache('suggestion_cached') or []
        new_suggestions = []
        ignored = [] if not ignored else ignored
        seen = [] if not seen else seen

        if ignore_imdb:
            suggested_imdbs = []
            for cs in cached_suggestion:
                if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs:
                    suggested_imdbs.append(cs.get('imdb'))
                    new_suggestions.append(cs)

        # Get new results and add them
        if len(new_suggestions) - 1 < limit:
            db = get_db()
            active_movies = fireEvent('media.with_status', ['active', 'done'], single = True)
            movies = [getIdentifier(x) for x in active_movies]
            movies.extend(seen)

            ignored.extend([x.get('imdb') for x in cached_suggestion])
            suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True)

            if suggestions:
                new_suggestions.extend(suggestions)

        self.setCache('suggestion_cached', new_suggestions, timeout = 3024000)

        return new_suggestions
Пример #12
0
    def updateReleaseDate(self, media_id):
        """
        Update release_date (eta) info only

        @param media_id: document id
        @return: dict, with dates dvd, theater, bluray, expires
        """

        try:
            db = get_db()

            media = db.get("id", media_id)

            if not media.get("info"):
                media = self.update(media_id)
                dates = media.get("info", {}).get("release_date")
            else:
                dates = media.get("info").get("release_date")

            if (
                dates
                and (dates.get("expires", 0) < time.time() or dates.get("expires", 0) > time.time() + (604800 * 4))
                or not dates
            ):
                dates = fireEvent("movie.info.release_date", identifier=getIdentifier(media), merge=True)
                media["info"].update({"release_date": dates})
                db.update(media)

            return dates
        except:
            log.error("Failed updating release dates: %s", traceback.format_exc())

        return {}
Пример #13
0
 def _search(self, movie, quality, results):
     title = getIdentifier(movie)
     data = self._post_query(title, self.getNorbitsQuality(quality.get('custom').get('quality')))
     if data:
         log.info('We got data: %s' % data)
         try:
             for result in data:
                 log.info('We got result: %s' % result)
                 download_url = self.getDownloadUrl(result['id'])
                 details_url = self.urls['detail'] % result['id']
                 log.info('Download url: %s' % download_url)
                 log.info('Details url: %s' % details_url)
                 append_data = {
                     'id': result['id'],
                     'name': result['name'],
                     'detail_url': details_url,
                     'size': tryInt(int(result['size']) / 1024 / 1024),
                     'seeders': tryInt(result['seeders']),
                     'leechers': tryInt(result['leechers']),
                     'url': download_url
                 }
                 log.info('Appending data: %s' % json.dumps(append_data))
                 results.append(append_data)
         except:
             log.error('Failed getting resutls from %s: %s' % (self.getName(), traceback.format_exc()))
         finally:
             log.info('Final results: %s' % results)
     return results
Пример #14
0
    def _search(self, movie, quality, results):

        domain = self.getDomain()
        if not domain:
            return

        search_url = self.urls['search'] % (domain, getIdentifier(movie))

        data = self.getJsonData(search_url) or {}
        data = data.get('data')

        if isinstance(data, dict) and data.get('movies'):
            try:
                for result in data.get('movies'):

                    for release in result.get('torrents', []):

                        if release['quality'] and release['quality'] not in result['title_long']:
                            title = result['title_long'] + ' BRRip ' + release['quality']
                        else:
                            title = result['title_long'] + ' BRRip'

                        results.append({
                            'id': release['hash'],
                            'name': title,
                            'url': release['url'],
                            'detail_url': result['url'],
                            'size': self.parseSize(release['size']),
                            'seeders': tryInt(release['seeds']),
                            'leechers': tryInt(release['peers']),
                        })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #15
0
    def findViaAlternative(self, group):
        results = {'480p': [], '720p': [], '1080p': []}

        movie_name = getTitle(group)

        url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name}))
        try:
            data = self.getCache('hdtrailers.alt.%s' % getIdentifier(group), url, show_error = False)
        except HTTPError:
            log.debug('No alternative page found for: %s', movie_name)
            data = None

        if not data:
            return results

        try:
            html = BeautifulSoup(data, parse_only = self.only_tables_tags)
            result_table = html.find_all('h2', text = re.compile(movie_name))

            for h2 in result_table:
                if 'trailer' in h2.lower():
                    parent = h2.parent.parent.parent
                    trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p'))
                    try:
                        for trailer in trailerLinks:
                            results[trailer].insert(0, trailer.parent['href'])
                    except:
                        pass

        except AttributeError:
            log.debug('No trailers found in via alternative.')

        return results
Пример #16
0
 def buildUrl(self, media, host):
     arguments = tryUrlencode({
         'user': host['name'],
         'passkey': host['pass_key'],
         'imdbid': getIdentifier(media),
         'search' : getTitle(media) + ' ' + str(media['info']['year']),
     })
     return '%s?%s' % (host['host'], arguments)
Пример #17
0
 def buildUrl(self, media, api_key):
     query = tryUrlencode({
         't': 'movie',
         'imdbid': getIdentifier(media).replace('tt', ''),
         'apikey': api_key,
         'extended': 1
     })
     return query
Пример #18
0
    def _search(self, media, quality, results):

        data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', '')))

        if data:

            cat_ids = self.getCatId(quality)
            table_order = ['name', 'size', None, 'age', 'seeds', 'leechers']

            try:
                html = BeautifulSoup(data)
                resultdiv = html.find('div', attrs = {'class': 'tabs'})
                for result in resultdiv.find_all('div', recursive = False):
                    if result.get('id').lower().strip('tab-') not in cat_ids:
                        continue

                    try:
                        for temp in result.find_all('tr'):
                            if temp['class'] is 'firstr' or not temp.get('id'):
                                continue

                            new = {}

                            nr = 0
                            for td in temp.find_all('td'):
                                column_name = table_order[nr]
                                if column_name:

                                    if column_name == 'name':
                                        link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
                                        new['id'] = temp.get('id')[-7:]
                                        new['name'] = link.text
                                        new['url'] = td.find('a', 'imagnet')['href']
                                        new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
                                        new['verified'] = True if td.find('a', 'iverify') else False
                                        new['score'] = 100 if new['verified'] else 0
                                    elif column_name is 'size':
                                        new['size'] = self.parseSize(td.text)
                                    elif column_name is 'age':
                                        new['age'] = self.ageToDays(td.text)
                                    elif column_name is 'seeds':
                                        new['seeders'] = tryInt(td.text)
                                    elif column_name is 'leechers':
                                        new['leechers'] = tryInt(td.text)

                                nr += 1

                            # Only store verified torrents
                            if self.conf('only_verified') and not new['verified']:
                                continue

                            results.append(new)
                    except:
                        log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())

            except AttributeError:
                log.debug('No search results found.')
Пример #19
0
    def _search(self, movie, quality, results):

        data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal')))

        if data:
            try:
                soup = BeautifulSoup(data)

                if soup.find('error'):
                    log.info(soup.find('error').get_text())
                    return

                authkey = soup.find('authkey').get_text()
                entries = soup.find_all('torrent')

                for entry in entries:

                    torrentscore = 0
                    torrent_id = entry.find('id').get_text()
                    name = entry.find('name').get_text()
                    year = entry.find('year').get_text()
                    releasegroup = entry.find('releasegroup').get_text()
                    resolution = entry.find('resolution').get_text()
                    encoding = entry.find('encoding').get_text()
                    freeleech = entry.find('freeleech').get_text()
                    media = entry.find('media').get_text()
                    audioformat = entry.find('audioformat').get_text()

                    # skip audio channel only releases
                    if resolution == '':
                        continue

                    torrent_desc = '%s.%s.%s.%s-%s' % (resolution, media, audioformat, encoding, releasegroup)

                    if self.conf('prefer_internal') and freeleech in ['0.25', '0.50']:
                        torrentscore += 200

                    if encoding == 'x264' and self.conf('favor') in ['encode', 'both']:
                        torrentscore += 200
                    elif re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']:
                        torrentscore += 200

                    name = re.sub(r'\W', '.', name)
                    name = re.sub(r'\.+', '.', name)
                    results.append({
                        'id': torrent_id,
                        'name': '%s.%s.%s' % (name, year, torrent_desc),
                        'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': tryInt(entry.find('size').get_text()) / 1048576,
                        'seeders': tryInt(entry.find('seeders').get_text()),
                        'leechers': tryInt(entry.find('leechers').get_text()),
                        'score': torrentscore
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #20
0
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        script_data = {
            'message': toUnicode(message)
        }

        if getIdentifier(data):
            script_data.update({
                'imdb_id': getIdentifier(data)
            })

        try:
            subprocess.call([self.conf('path'), message])
            return True
        except:
            log.error('Script notification failed: %s', traceback.format_exc())

        return False
Пример #21
0
    def suggestView(self, limit = 6, **kwargs):
        if self.isDisabled():
            return {
                'success': True,
                'movies': []
            }

        movies = splitString(kwargs.get('movies', ''))
        ignored = splitString(kwargs.get('ignored', ''))
        seen = splitString(kwargs.get('seen', ''))

        cached_suggestion = self.getCache('suggestion_cached')
        if cached_suggestion:
            suggestions = cached_suggestion
        else:

            if not movies or len(movies) == 0:
                active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True)
                movies = [getIdentifier(x) for x in active_movies]

            if not ignored or len(ignored) == 0:
                ignored = splitString(Env.prop('suggest_ignore', default = ''))
            if not seen or len(seen) == 0:
                movies.extend(splitString(Env.prop('suggest_seen', default = '')))

            suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
            self.setCache('suggestion_cached', suggestions, timeout = 6048000)  # Cache for 10 weeks

        medias = []
        for suggestion in suggestions[:int(limit)]:

            # Cache poster
            posters = suggestion.get('images', {}).get('poster', [])
            poster = [x for x in posters if 'tmdb' in x]
            posters = poster if len(poster) > 0 else posters

            cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False
            files = {'image_poster': [cached_poster] } if cached_poster else {}

            medias.append({
                'status': 'suggested',
                'title': getTitle(suggestion),
                'type': 'movie',
                'info': suggestion,
                'files': files,
                'identifiers': {
                    'imdb': suggestion.get('imdb')
                }
            })

        return {
            'success': True,
            'movies': medias
        }
Пример #22
0
    def _search(self, movie, quality, results):
        imdbId = getIdentifier (movie).replace ("t", "")
        url = self.urls['search'] % (imdbId)#, cats[0])
        data = self.getHTMLData(url)
        
        if data:
          
          # Remove HDSpace NEW list
          split_data = data.partition('<form name="tcategories" action="index.php" method="post">')
          data = split_data[2]

          html = BeautifulSoup(data)
          try:
              #Now attempt to get any others
              result_table = html.find('table', attrs = {'class' : 'lista'})
              if not result_table:
                  return

              entries = result_table.find_all('tr')
              log.info("entries length: %s", len(entries))

              if not entries:
                  return

              for result in entries:
                  block2 = result.find_all('td', attrs={'class' : 'header'})
                  # Ignore header
                  if block2:
                      continue
                  cells = result.find_all('td')
                  log.info("cells length: %s", len(cells))

                  extend = 0
                  detail = cells[1 + extend].find('a')['href']
                  torrent_id = detail.replace('index.php?page=torrent-details&id=', '')
                  try:
                    torrent_age = datetime.now() - datetime.strptime(cells[4 + extend].get_text().encode('ascii','ignore'), '%B %d, %Y,%H:%M:%S')
                  except:
                    torrent_age = timedelta(1)

                  results.append({
                                  'id': torrent_id,
                                  'name': cells[9 + extend].find('a')['title'].strip('History - ').replace('Blu-ray', 'bd50'),
                                  'url': self.urls['home'] % cells[3 + extend].find('a')['href'],
                                  'detail_url': self.urls['home'] % cells[1 + extend].find('a')['href'],
                                  'size': self.parseSize(cells[5 + extend].get_text()),
                                  'age': torrent_age.days,
                                  'seeders': tryInt(cells[7 + extend].find('a').get_text()),
                                  'leechers': tryInt(cells[8 + extend].find('a').get_text()),
                                  'get_more_info': self.getMoreInfo,
                  })

          except:
              log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #23
0
    def notify(self, message="", data=None, listener=None):
        if not data:
            data = {}

        http_handler = HTTPSConnection("api.pushover.net:443")

        api_data = {
            "user": self.conf("user_key"),
            "token": self.conf("api_token"),
            "message": toUnicode(message),
            "priority": self.conf("priority"),
            "sound": self.conf("sound"),
        }

        if data and getIdentifier(data):
            api_data.update(
                {
                    "url": toUnicode("http://www.imdb.com/title/%s/" % getIdentifier(data)),
                    "url_title": toUnicode("%s on IMDb" % getTitle(data)),
                }
            )

        http_handler.request(
            "POST",
            "/1/messages.json",
            headers={"Content-type": "application/x-www-form-urlencoded"},
            body=tryUrlencode(api_data),
        )

        response = http_handler.getresponse()
        request_status = response.status

        if request_status == 200:
            log.info("Pushover notifications sent.")
            return True
        elif request_status == 401:
            log.error("Pushover auth failed: %s", response.reason)
            return False
        else:
            log.error("Pushover notification failed: %s", request_status)
            return False
Пример #24
0
 def buildUrl(self, media, quality):
     query = tryUrlencode({
         'q': getIdentifier(media),
         'm': 'n',
         'max': 400,
         'adv_age': Env.setting('retention', 'nzb'),
         'adv_sort': 'date',
         'adv_col': 'on',
         'adv_nfo': 'on',
         'xminsize': quality.get('size_min'),
         'xmaxsize': quality.get('size_max'),
     })
     return query
Пример #25
0
    def buildUrl(self, media, host):

        query = tryUrlencode({
            't': 'movie',
            'imdbid': getIdentifier(media).replace('tt', ''),
            'apikey': host['api_key'],
            'extended': 1
        })

        if len(host.get('custom_tag', '')) > 0:
            query = '%s&%s' % (query, host.get('custom_tag'))

        return query
Пример #26
0
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        post_data = {
            'message': toUnicode(message)
        }

        if getIdentifier(data):
            post_data.update({
                'imdb_id': getIdentifier(data)
            })

        headers = {
            'Content-type': 'application/x-www-form-urlencoded'
        }

        try:
            self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False)
            return True
        except:
            log.error('Webhook notification failed: %s', traceback.format_exc())

        return False
Пример #27
0
    def cpTag(self, media, unique_tag = False):

        tag = ''
        if Env.setting('enabled', 'renamer') or unique_tag:
            identifier = getIdentifier(media) or ''
            unique_tag = ', ' + randomString() if unique_tag else ''

            tag = '.cp('
            tag += identifier
            tag += ', ' if unique_tag and identifier else ''
            tag += randomString() if unique_tag else ''
            tag += ')'

        return tag if len(tag) > 7 else ''
Пример #28
0
    def _search(self, movie, quality, results):

        data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal')))

        if data:
            try:
                soup = BeautifulSoup(data)

                if soup.find('error'):
                    log.error(soup.find('error').get_text())
                    return

                authkey = soup.find('authkey').get_text()
                entries = soup.find_all('torrent')

                for entry in entries:

                    torrentscore = 0
                    torrent_id = entry.find('id').get_text()
                    name = entry.find('name').get_text()
                    year = entry.find('year').get_text()
                    releasegroup = entry.find('releasegroup').get_text()
                    resolution = entry.find('resolution').get_text()
                    encoding = entry.find('encoding').get_text()
                    freeleech = entry.find('freeleech').get_text()
                    torrent_desc = '/ %s / %s / %s ' % (releasegroup, resolution, encoding)

                    if freeleech == '0.25' and self.conf('prefer_internal'):
                        torrent_desc += '/ Internal'
                        torrentscore += 200

                    if encoding == 'x264' and self.conf('favor') in ['encode', 'both']:
                        torrentscore += 300
                    if re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']:
                        torrentscore += 200

                    results.append({
                        'id': torrent_id,
                        'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
                        'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': tryInt(entry.find('size').get_text()) / 1048576,
                        'seeders': tryInt(entry.find('seeders').get_text()),
                        'leechers': tryInt(entry.find('leechers').get_text()),
                        'score': torrentscore
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #29
0
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        http_handler = HTTPSConnection("api.pushover.net:443")

        api_data = {
            'user': self.conf('user_key'),
            'token': self.conf('api_token'),
            'message': toUnicode(message),
            'priority': self.conf('priority'),
            'sound': self.conf('sound'),
        }

        if data and getIdentifier(data):
            api_data.update({
                'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)),
                'url_title': toUnicode('%s on IMDb' % getTitle(data)),
            })

        http_handler.request('POST', '/1/messages.json',
                             headers = {'Content-type': 'application/x-www-form-urlencoded'},
                             body = tryUrlencode(api_data)
        )

        response = http_handler.getresponse()
        request_status = response.status

        if request_status == 200:
            log.info('Pushover notifications sent.')
            return True
        elif request_status == 401:
            log.error('Pushover auth failed: %s', response.reason)
            return False
        else:
            log.error('Pushover notification failed: %s', request_status)
            return False
Пример #30
0
    def searchAll(self, manual = False):

        if self.in_progress:
            log.info('Search already in progress')
            fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress')
            return

        self.in_progress = True
        fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started')

        medias = [x['_id'] for x in fireEvent('media.with_status', 'active', types = 'movie', with_doc = False, single = True)]
        random.shuffle(medias)

        total = len(medias)
        self.in_progress = {
            'total': total,
            'to_go': total,
        }

        try:
            search_protocols = fireEvent('searcher.protocols', single = True)

            for media_id in medias:

                media = fireEvent('media.get', media_id, single = True)
                if not media: continue

                try:
                    self.single(media, search_protocols, manual = manual)
                except IndexError:
                    log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc()))
                    fireEvent('movie.update', media_id)
                except:
                    log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc()))

                self.in_progress['to_go'] -= 1

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

        except SearchSetupError:
            pass

        self.in_progress = False
Пример #31
0
    def cpTag(self, media):
        if Env.setting('enabled', 'renamer'):
            identifier = getIdentifier(media)
            return '.cp(' + identifier + ')' if identifier else ''

        return ''
Пример #32
0
    def _search(self, media, quality, results):

        movie_title = getTitle(media)
        quality_id = quality['identifier']

        params = mergeDicts(
            self.quality_search_params[quality_id].copy(), {
                'order_by': 'relevance',
                'order_way': 'descending',
                'searchstr': getIdentifier(media)
            })

        url = '%s?json=noredirect&%s' % (self.urls['torrent'],
                                         tryUrlencode(params))
        res = self.getJsonData(url)

        try:
            if not 'Movies' in res:
                return

            authkey = res['AuthKey']
            passkey = res['PassKey']

            for ptpmovie in res['Movies']:
                if not 'Torrents' in ptpmovie:
                    log.debug('Movie %s (%s) has NO torrents',
                              (ptpmovie['Title'], ptpmovie['Year']))
                    continue

                log.debug('Movie %s (%s) has %d torrents',
                          (ptpmovie['Title'], ptpmovie['Year'],
                           len(ptpmovie['Torrents'])))
                for torrent in ptpmovie['Torrents']:
                    torrent_id = tryInt(torrent['Id'])
                    torrentdesc = '%s %s %s' % (torrent['Resolution'],
                                                torrent['Source'],
                                                torrent['Codec'])
                    torrentscore = 0

                    if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']:
                        torrentdesc += ' HQ'
                        if self.conf('prefer_golden'):
                            torrentscore += 5000
                    if 'FreeleechType' in torrent:
                        torrentdesc += ' Freeleech'
                        if self.conf('prefer_freeleech'):
                            torrentscore += 7000
                    if 'Scene' in torrent and torrent['Scene']:
                        torrentdesc += ' Scene'
                        if self.conf('prefer_scene'):
                            torrentscore += 2000
                    if 'RemasterTitle' in torrent and torrent['RemasterTitle']:
                        torrentdesc += self.htmlToASCII(
                            ' %s' % torrent['RemasterTitle'])

                    torrentdesc += ' (%s)' % quality_id
                    torrent_name = re.sub(
                        '[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) - %s' %
                        (movie_title, ptpmovie['Year'], torrentdesc))

                    def extra_check(item):
                        return self.torrentMeetsQualitySpec(item, quality_id)

                    results.append({
                        'id':
                        torrent_id,
                        'name':
                        torrent_name,
                        'Source':
                        torrent['Source'],
                        'Checked':
                        'true' if torrent['Checked'] else 'false',
                        'Resolution':
                        torrent['Resolution'],
                        'url':
                        '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' %
                        (self.urls['torrent'], torrent_id, authkey, passkey),
                        'detail_url':
                        self.urls['detail'] % torrent_id,
                        'date':
                        tryInt(
                            time.mktime(
                                parse(torrent['UploadTime']).timetuple())),
                        'size':
                        tryInt(torrent['Size']) / (1024 * 1024),
                        'seeders':
                        tryInt(torrent['Seeders']),
                        'leechers':
                        tryInt(torrent['Leechers']),
                        'score':
                        torrentscore,
                        'extra_check':
                        extra_check,
                    })

        except:
            log.error('Failed getting results from %s: %s',
                      (self.getName(), traceback.format_exc()))
Пример #33
0
    def _search(self, media, quality, results):

        data = self.getHTMLData(
            self.urls['search'] %
            (self.getDomain(), 'm', getIdentifier(media).replace('tt', '')))

        if data:

            cat_ids = self.getCatId(quality)
            table_order = ['name', 'size', None, 'age', 'seeds', 'leechers']

            try:
                html = BeautifulSoup(data)
                resultdiv = html.find('div', attrs={'class': 'tabs'})
                for result in resultdiv.find_all('div', recursive=False):
                    if result.get('id').lower().strip('tab-') not in cat_ids:
                        continue

                    try:
                        for temp in result.find_all('tr'):
                            if temp['class'] is 'firstr' or not temp.get('id'):
                                continue

                            new = {}

                            nr = 0
                            for td in temp.find_all('td'):
                                column_name = table_order[nr]
                                if column_name:

                                    if column_name == 'name':
                                        link = td.find('div', {
                                            'class': 'torrentname'
                                        }).find_all('a')[2]
                                        new['id'] = temp.get('id')[-7:]
                                        new['name'] = link.text
                                        new['url'] = td.find('a',
                                                             'imagnet')['href']
                                        new['detail_url'] = self.urls[
                                            'detail'] % (self.getDomain(),
                                                         link['href'][1:])
                                        new['verified'] = True if td.find(
                                            'a', 'iverify') else False
                                        new['score'] = 100 if new[
                                            'verified'] else 0
                                    elif column_name is 'size':
                                        new['size'] = self.parseSize(td.text)
                                    elif column_name is 'age':
                                        new['age'] = self.ageToDays(td.text)
                                    elif column_name is 'seeds':
                                        new['seeders'] = tryInt(td.text)
                                    elif column_name is 'leechers':
                                        new['leechers'] = tryInt(td.text)

                                nr += 1

                            # Only store verified torrents
                            if self.conf(
                                    'only_verified') and not new['verified']:
                                continue

                            results.append(new)
                    except:
                        log.error('Failed parsing KickAssTorrents: %s',
                                  traceback.format_exc())

            except AttributeError:
                log.debug('No search results found.')
Пример #34
0
    def updateInfo(self,
                   media_id=None,
                   identifier=None,
                   default_title=None,
                   extended=False):
        """
        Update movie information inside media['doc']['info']

        @param media_id: document id
        @param default_title: default title, if empty, use first one or existing one
        @param extended: update with extended info (parses more info, actors, images from some info providers)
        @return: dict, with media
        """

        if self.shuttingDown():
            return

        try:
            db = get_db()

            if media_id:
                media = db.get('id', media_id)
            else:
                media = db.get('media', 'imdb-%s' % identifier,
                               with_doc=True)['doc']

            info = fireEvent('movie.info',
                             merge=True,
                             extended=extended,
                             identifier=getIdentifier(media))

            # Don't need those here
            try:
                del info['in_wanted']
            except:
                pass
            try:
                del info['in_library']
            except:
                pass

            if not info or len(info) == 0:
                log.error('Could not update, no movie info to work with: %s',
                          identifier)
                return False

            # Update basic info
            media['info'] = info

            titles = info.get('titles', [])
            log.debug('Adding titles: %s', titles)

            # Define default title
            if default_title:
                def_title = None
                if default_title:
                    counter = 0
                    for title in titles:
                        if title.lower() == toUnicode(
                                default_title.lower()) or (
                                    toUnicode(default_title) == six.u('')
                                    and toUnicode(titles[0]) == title):
                            def_title = toUnicode(title)
                            break
                        counter += 1

                if not def_title:
                    def_title = toUnicode(titles[0])

                media['title'] = def_title

            # Files
            images = info.get('images', [])
            media['files'] = media.get('files', {})
            for image_type in ['poster']:

                # Remove non-existing files
                file_type = 'image_%s' % image_type
                existing_files = list(set(media['files'].get(file_type, [])))
                for ef in media['files'].get(file_type, []):
                    if not os.path.isfile(ef):
                        existing_files.remove(ef)

                # Replace new files list
                media['files'][file_type] = existing_files
                if len(existing_files) == 0:
                    del media['files'][file_type]

                # Loop over type
                for image in images.get(image_type, []):
                    if not isinstance(image, (str, unicode)):
                        continue

                    if file_type not in media['files'] or len(
                            media['files'].get(file_type, [])) == 0:
                        file_path = fireEvent('file.download',
                                              url=image,
                                              single=True)
                        if file_path:
                            media['files'][file_type] = [file_path]
                            break
                    else:
                        break

            db.update(media)

            return media
        except:
            log.error('Failed update media: %s', traceback.format_exc())

        return {}
Пример #35
0
    def correctRelease(self, nzb=None, media=None, quality=None, **kwargs):

        if media.get('type') != 'movie': return

        media_title = fireEvent('searcher.get_search_title',
                                media,
                                single=True)

        imdb_results = kwargs.get('imdb_results', False)
        retention = Env.setting('retention', section='nzb')

        if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
            log.info2(
                'Wrong: Outside retention, age is %s, needs %s or lower: %s',
                (nzb['age'], retention, nzb['name']))
            return False

        # Check for required and ignored words
        if not fireEvent(
                'searcher.correct_words', nzb['name'], media, single=True):
            return False

        preferred_quality = quality if quality else fireEvent(
            'quality.single', identifier=quality['identifier'], single=True)

        # Contains lower quality string
        contains_other = fireEvent('searcher.contains_other_quality',
                                   nzb,
                                   movie_year=media['info']['year'],
                                   preferred_quality=preferred_quality,
                                   single=True)
        if contains_other != False:
            log.info2(
                'Wrong: %s, looking for %s, found %s',
                (nzb['name'], quality['label'], [x for x in contains_other]
                 if contains_other else 'no quality'))
            return False

        # Contains lower quality string
        if not fireEvent('searcher.correct_3d',
                         nzb,
                         preferred_quality=preferred_quality,
                         single=True):
            log.info2(
                'Wrong: %s, %slooking for %s in 3D',
                (nzb['name'],
                 ('' if preferred_quality['custom'].get('3d') else 'NOT '),
                 quality['label']))
            return False

        # File to small
        if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_min']))
            return False

        # File to large
        if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_max']))
            return False

        # Provider specific functions
        get_more = nzb.get('get_more_info')
        if get_more:
            get_more(nzb)

        extra_check = nzb.get('extra_check')
        if extra_check and not extra_check(nzb):
            return False

        if imdb_results:
            return True

        # Check if nzb contains imdb link
        if getImdb(nzb.get('description', '')) == getIdentifier(media):
            return True

        for raw_title in media['info']['titles']:
            for movie_title in possibleTitles(raw_title):
                movie_words = re.split('\W+', simplifyString(movie_title))

                if fireEvent('searcher.correct_name',
                             nzb['name'],
                             movie_title,
                             single=True):
                    # if no IMDB link, at least check year range 1
                    if len(movie_words) > 2 and fireEvent(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            1,
                            single=True):
                        return True

                    # if no IMDB link, at least check year
                    if len(movie_words) <= 2 and fireEvent(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            0,
                            single=True):
                        return True

        log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'",
                 (nzb['name'], media_title, media['info']['year']))
        return False
Пример #36
0
    def _search(self, movie, quality, results):

        url = self.urls['search'] % (getIdentifier(movie))#, cats[0])
        data = self.getHTMLData(url)
        
        if data:
          
          # Remove HDTorrents NEW list
          split_data = data.partition('<!-- Show New Torrents After Last Visit -->\n\n\n\n')
          data = split_data[2]

          html = BeautifulSoup(data)
          try:
              #Get first entry in table
              entries = html.find_all('td', attrs={'align' : 'center'})

              if len(entries) < 21:
                  return

              base = 21
              extend = 0

              try:
                  torrent_id = entries[base].find('div')['id']
              except:
                  extend = 2
                  torrent_id = entries[base + extend].find('div')['id']

              torrent_age = datetime.now() - datetime.strptime(entries[15 + extend].get_text()[:8] + ' ' + entries[15 + extend].get_text()[-10::], '%H:%M:%S %d/%m/%Y')
              
              results.append({
                              'id': torrent_id,
                              'name': entries[20 + extend].find('a')['title'].strip('History - ').replace('Blu-ray', 'bd50'),
                              'url': self.urls['home'] % entries[13 + extend].find('a')['href'],
                              'detail_url': self.urls['detail'] % torrent_id,
                              'size': self.parseSize(entries[16 + extend].get_text()),
                              'age': torrent_age.days,
                              'seeders': tryInt(entries[18 + extend].get_text()),
                              'leechers': tryInt(entries[19 + extend].get_text()),
                              'get_more_info': self.getMoreInfo,
              })

              #Now attempt to get any others
              result_table = html.find('table', attrs = {'class' : 'mainblockcontenttt'})

              if not result_table:
                  return

              entries = result_table.find_all('td', attrs={'align' : 'center', 'class' : 'listas'})

              if not entries:
                  return

              for result in entries:
                  block2 = result.find_parent('tr').find_next_sibling('tr')
                  if not block2:
                      continue
                  cells = block2.find_all('td')
                  try:
                      extend = 0
                      detail = cells[1 + extend].find('a')['href']
                  except:
                      extend = 1
                      detail = cells[1 + extend].find('a')['href']
                  torrent_id = detail.replace('details.php?id=', '')
                  torrent_age = datetime.now() - datetime.strptime(cells[5 + extend].get_text(), '%H:%M:%S %d/%m/%Y')

                  results.append({
                                  'id': torrent_id,
                                  'name': cells[1 + extend].find('b').get_text().strip('\t ').replace('Blu-ray', 'bd50'),
                                  'url': self.urls['home'] % cells[3 + extend].find('a')['href'],
                                  'detail_url': self.urls['home'] % cells[1 + extend].find('a')['href'],
                                  'size': self.parseSize(cells[6 + extend].get_text()),
                                  'age': torrent_age.days,
                                  'seeders': tryInt(cells[8 + extend].get_text()),
                                  'leechers': tryInt(cells[9 + extend].get_text()),
                                  'get_more_info': self.getMoreInfo,
                  })

          except:
              log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #37
0
    def update(self,
               media_id=None,
               identifier=None,
               default_title=None,
               extended=False):
        """
        Update movie information inside media['doc']['info']

        @param media_id: document id
        @param default_title: default title, if empty, use first one or existing one
        @param extended: update with extended info (parses more info, actors, images from some info providers)
        @return: dict, with media
        """

        if self.shuttingDown():
            return

        lock_key = 'media.get.%s' % media_id if media_id else identifier
        self.acquireLock(lock_key)

        media = {}
        try:
            db = get_db()

            if media_id:
                media = db.get('id', media_id)
            else:
                media = db.get('media', 'imdb-%s' % identifier,
                               with_doc=True)['doc']

            info = fireEvent('movie.info',
                             merge=True,
                             extended=extended,
                             identifier=getIdentifier(media))

            # Don't need those here
            try:
                del info['in_wanted']
            except:
                pass
            try:
                del info['in_library']
            except:
                pass

            if not info or len(info) == 0:
                log.error('Could not update, no movie info to work with: %s',
                          identifier)
                return False

            # Update basic info
            media['info'] = info

            titles = info.get('titles', [])
            log.debug('Adding titles: %s', titles)

            # Define default title
            if default_title:
                def_title = None
                if default_title:
                    counter = 0
                    for title in titles:
                        if title.lower() == toUnicode(
                                default_title.lower()) or (
                                    toUnicode(default_title) == six.u('')
                                    and toUnicode(titles[0]) == title):
                            def_title = toUnicode(title)
                            break
                        counter += 1

                if not def_title:
                    def_title = toUnicode(titles[0])

                media['title'] = def_title

            # Files
            image_urls = info.get('images', [])

            self.getPoster(media, image_urls)

            db.update(media)
        except:
            log.error('Failed update media: %s', traceback.format_exc())

        self.releaseLock(lock_key)
        return media
Пример #38
0
    def updateLibrary(self, full=True):
        last_update = float(Env.prop('manage.last_update', default=0))

        if self.in_progress:
            log.info('Already updating library: %s', self.in_progress)
            return
        elif self.isDisabled() or (last_update > time.time() - 20):
            return

        self.in_progress = {}
        fireEvent('notify.frontend', type='manage.updating', data=True)

        try:

            directories = self.directories()
            directories.sort()
            added_identifiers = []

            # Add some progress
            for directory in directories:
                self.in_progress[os.path.normpath(directory)] = {
                    'started': False,
                    'eta': -1,
                    'total': None,
                    'to_go': None,
                }

            for directory in directories:
                folder = os.path.normpath(directory)
                self.in_progress[os.path.normpath(
                    directory)]['started'] = tryInt(time.time())

                if not os.path.isdir(folder):
                    if len(directory) > 0:
                        log.error('Directory doesn\'t exist: %s', folder)
                    continue

                log.info('Updating manage library: %s', folder)
                fireEvent('notify.frontend',
                          type='manage.update',
                          data=True,
                          message='Scanning for movies in "%s"' % folder)

                onFound = self.createAddToLibrary(folder, added_identifiers)
                fireEvent('scanner.scan',
                          folder=folder,
                          simple=True,
                          newer_than=last_update if not full else 0,
                          on_found=onFound,
                          single=True)

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

            # If cleanup option is enabled, remove offline files from database
            if self.conf('cleanup') and full and not self.shuttingDown():

                # Get movies with done status
                total_movies, done_movies = fireEvent('media.list',
                                                      types='movie',
                                                      status='done',
                                                      release_status='done',
                                                      status_or=True,
                                                      single=True)

                for done_movie in done_movies:
                    if getIdentifier(done_movie) not in added_identifiers:
                        fireEvent('media.delete',
                                  media_id=done_movie['_id'],
                                  delete_from='all')
                    else:

                        releases = done_movie.get('releases', [])

                        for release in releases:
                            if release.get('files'):
                                brk = False
                                for file_type in release.get('files', {}):
                                    for release_file in release['files'][
                                            file_type]:
                                        # Remove release not available anymore
                                        if not os.path.isfile(
                                                sp(release_file)):
                                            fireEvent('release.clean',
                                                      release['_id'])
                                            brk = True
                                            break
                                    if brk:
                                        break

                        # Check if there are duplicate releases (different quality) use the last one, delete the rest
                        if len(releases) > 1:
                            used_files = {}
                            for release in releases:
                                for file_type in release.get('files', {}):
                                    for release_file in release['files'][
                                            file_type]:
                                        already_used = used_files.get(
                                            release_file)

                                        if already_used:
                                            # delete current one
                                            if already_used.get(
                                                    'last_edit',
                                                    0) < release.get(
                                                        'last_edit', 0):
                                                fireEvent('release.delete',
                                                          release['_id'],
                                                          single=True)
                                            # delete previous one
                                            else:
                                                fireEvent('release.delete',
                                                          already_used['_id'],
                                                          single=True)
                                            break
                                        else:
                                            used_files[release_file] = release
                            del used_files

            Env.prop('manage.last_update', time.time())
        except:
            log.error('Failed updating library: %s', (traceback.format_exc()))

        while self.in_progress and len(
                self.in_progress) > 0 and not self.shuttingDown():

            delete_me = {}

            for folder in self.in_progress:
                if self.in_progress[folder]['to_go'] <= 0:
                    delete_me[folder] = True

            for delete in delete_me:
                del self.in_progress[delete]

            time.sleep(1)

        fireEvent('notify.frontend', type='manage.updating', data=False)
        self.in_progress = False
Пример #39
0
    def _search(self, movie, quality, results):

        data = self.getHTMLData(self.urls['search'] %
                                (self.conf('passkey'), getIdentifier(movie),
                                 self.conf('only_internal')))

        if data:
            try:
                soup = BeautifulSoup(data)

                if soup.find('error'):
                    log.error(soup.find('error').get_text())
                    return

                authkey = soup.find('authkey').get_text()
                entries = soup.find_all('torrent')

                for entry in entries:

                    torrentscore = 0
                    torrent_id = entry.find('id').get_text()
                    name = entry.find('name').get_text()
                    year = entry.find('year').get_text()
                    releasegroup = entry.find('releasegroup').get_text()
                    resolution = entry.find('resolution').get_text()
                    encoding = entry.find('encoding').get_text()
                    freeleech = entry.find('freeleech').get_text()
                    torrent_desc = '/ %s / %s / %s ' % (releasegroup,
                                                        resolution, encoding)

                    if freeleech == '0.25' and self.conf('prefer_internal'):
                        torrent_desc += '/ Internal'
                        torrentscore += 200

                    if encoding == 'x264' and self.conf('favor') in [
                            'encode', 'both'
                    ]:
                        torrentscore += 300
                    if re.search('Remux', encoding) and self.conf('favor') in [
                            'remux', 'both'
                    ]:
                        torrentscore += 200

                    results.append({
                        'id':
                        torrent_id,
                        'name':
                        re.sub('[^A-Za-z0-9\-_ \(\).]+', '',
                               '%s (%s) %s' % (name, year, torrent_desc)),
                        'url':
                        self.urls['download'] %
                        (torrent_id, authkey, self.conf('passkey')),
                        'detail_url':
                        self.urls['detail'] % torrent_id,
                        'size':
                        self.parseSize(entry.find('size').get_text()),
                        'seeders':
                        tryInt(entry.find('seeders').get_text()),
                        'leechers':
                        tryInt(entry.find('leechers').get_text()),
                        'score':
                        torrentscore
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Пример #40
0
    def scan(self,
             folder=None,
             files=None,
             release_download=None,
             simple=False,
             newer_than=0,
             return_ignored=True,
             check_file_date=True,
             on_found=None):

        folder = sp(folder)

        if not folder or not os.path.isdir(folder):
            log.error('Folder doesn\'t exists: %s', folder)
            return {}

        # Get movie "master" files
        movie_files = {}
        leftovers = []

        # Scan all files of the folder if no files are set
        if not files:
            try:
                files = []
                for root, dirs, walk_files in os.walk(folder,
                                                      followlinks=True):
                    files.extend([
                        sp(os.path.join(sp(root), ss(filename)))
                        for filename in walk_files
                    ])

                    # Break if CP wants to shut down
                    if self.shuttingDown():
                        break

            except:
                log.error('Failed getting files from %s: %s',
                          (folder, traceback.format_exc()))

            log.debug('Found %s files to scan and group in %s',
                      (len(files), folder))
        else:
            check_file_date = False
            files = [sp(x) for x in files]

        for file_path in files:

            if not os.path.exists(file_path):
                continue

            # Remove ignored files
            if self.isSampleFile(file_path):
                leftovers.append(file_path)
                continue
            elif not self.keepFile(file_path):
                continue

            is_dvd_file = self.isDVDFile(file_path)
            if self.filesizeBetween(
                    file_path, self.file_sizes['movie']
            ) or is_dvd_file:  # Minimal 300MB files or is DVD file

                # Normal identifier
                identifier = self.createStringIdentifier(
                    file_path, folder, exclude_filename=is_dvd_file)
                identifiers = [identifier]

                # Identifier with quality
                quality = fireEvent('quality.guess',
                                    files=[file_path],
                                    size=self.getFileSize(file_path),
                                    single=True) if not is_dvd_file else {
                                        'identifier': 'dvdr'
                                    }
                if quality:
                    identifier_with_quality = '%s %s' % (
                        identifier, quality.get('identifier', ''))
                    identifiers = [identifier_with_quality, identifier]

                if not movie_files.get(identifier):
                    movie_files[identifier] = {
                        'unsorted_files': [],
                        'identifiers': identifiers,
                        'is_dvd': is_dvd_file,
                    }

                movie_files[identifier]['unsorted_files'].append(file_path)
            else:
                leftovers.append(file_path)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleanup
        del files

        # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
        # files will be grouped first.
        leftovers = set(sorted(leftovers, reverse=True))

        # Group files minus extension
        ignored_identifiers = []
        for identifier, group in movie_files.items():
            if identifier not in group['identifiers'] and len(identifier) > 0:
                group['identifiers'].append(identifier)

            log.debug('Grouping files: %s', identifier)

            has_ignored = 0
            for file_path in list(group['unsorted_files']):
                ext = getExt(file_path)
                wo_ext = file_path[:-(len(ext) + 1)]
                found_files = set([i for i in leftovers if wo_ext in i])
                group['unsorted_files'].extend(found_files)
                leftovers = leftovers - found_files

                has_ignored += 1 if ext in self.ignored_extensions else 0

            if has_ignored == 0:
                for file_path in list(group['unsorted_files']):
                    ext = getExt(file_path)
                    has_ignored += 1 if ext in self.ignored_extensions else 0

            if has_ignored > 0:
                ignored_identifiers.append(identifier)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Create identifiers for all leftover files
        path_identifiers = {}
        for file_path in leftovers:
            identifier = self.createStringIdentifier(file_path, folder)

            if not path_identifiers.get(identifier):
                path_identifiers[identifier] = []

            path_identifiers[identifier].append(file_path)

        # Group the files based on the identifier
        delete_identifiers = []
        for identifier, found_files in path_identifiers.items():
            log.debug('Grouping files on identifier: %s', identifier)

            group = movie_files.get(identifier)
            if group:
                group['unsorted_files'].extend(found_files)
                delete_identifiers.append(identifier)

                # Remove the found files from the leftover stack
                leftovers = leftovers - set(found_files)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Group based on folder
        delete_identifiers = []
        for identifier, found_files in path_identifiers.items():
            log.debug('Grouping files on foldername: %s', identifier)

            for ff in found_files:
                new_identifier = self.createStringIdentifier(
                    os.path.dirname(ff), folder)

                group = movie_files.get(new_identifier)
                if group:
                    group['unsorted_files'].extend([ff])
                    delete_identifiers.append(identifier)

                    # Remove the found files from the leftover stack
                    leftovers -= leftovers - set([ff])

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # leftovers should be empty
        if leftovers:
            log.debug('Some files are still left over: %s', leftovers)

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Make sure we remove older / still extracting files
        valid_files = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = movie_files.popitem()
            except:
                break

            # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute
            if check_file_date:
                files_too_new, time_string = self.checkFilesChanged(
                    group['unsorted_files'])
                if files_too_new:
                    log.info(
                        'Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s',
                        (time_string, identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            # Only process movies newer than x
            if newer_than and newer_than > 0:
                has_new_files = False
                for cur_file in group['unsorted_files']:
                    file_time = self.getFileTimes(cur_file)
                    if file_time[0] > newer_than or file_time[1] > newer_than:
                        has_new_files = True
                        break

                if not has_new_files:
                    log.debug(
                        'None of the files have changed since %s for %s, skipping.',
                        (time.ctime(newer_than), identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            valid_files[identifier] = group

        del movie_files

        total_found = len(valid_files)

        # Make sure only one movie was found if a download ID is provided
        if release_download and total_found == 0:
            log.info(
                'Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).',
                release_download.get('imdb_id'))
        elif release_download and total_found > 1:
            log.info(
                'Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...',
                (release_download.get('imdb_id'), len(valid_files)))
            release_download = None

        # Determine file types
        processed_movies = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = valid_files.popitem()
            except:
                break

            if return_ignored is False and identifier in ignored_identifiers:
                log.debug('Ignore file found, ignoring release: %s',
                          identifier)
                total_found -= 1
                continue

            # Group extra (and easy) files first
            group['files'] = {
                'movie_extra': self.getMovieExtras(group['unsorted_files']),
                'subtitle': self.getSubtitles(group['unsorted_files']),
                'subtitle_extra':
                self.getSubtitlesExtras(group['unsorted_files']),
                'nfo': self.getNfo(group['unsorted_files']),
                'trailer': self.getTrailers(group['unsorted_files']),
                'leftover': set(group['unsorted_files']),
            }

            # Media files
            if group['is_dvd']:
                group['files']['movie'] = self.getDVDFiles(
                    group['unsorted_files'])
            else:
                group['files']['movie'] = self.getMediaFiles(
                    group['unsorted_files'])

            if len(group['files']['movie']) == 0:
                log.error('Couldn\'t find any movie files for %s', identifier)
                total_found -= 1
                continue

            log.debug('Getting metadata for %s', identifier)
            group['meta_data'] = self.getMetaData(
                group, folder=folder, release_download=release_download)

            # Subtitle meta
            group['subtitle_language'] = self.getSubtitleLanguage(
                group) if not simple else {}

            # Get parent dir from movie files
            for movie_file in group['files']['movie']:
                group['parentdir'] = os.path.dirname(movie_file)
                group['dirname'] = None

                folder_names = group['parentdir'].replace(folder, '').split(
                    os.path.sep)
                folder_names.reverse()

                # Try and get a proper dirname, so no "A", "Movie", "Download" etc
                for folder_name in folder_names:
                    if folder_name.lower(
                    ) not in self.ignore_names and len(folder_name) > 2:
                        group['dirname'] = folder_name
                        break

                break

            # Leftover "sorted" files
            for file_type in group['files']:
                if not file_type is 'leftover':
                    group['files']['leftover'] -= set(
                        group['files'][file_type])
                    group['files'][file_type] = list(group['files'][file_type])
            group['files']['leftover'] = list(group['files']['leftover'])

            # Delete the unsorted list
            del group['unsorted_files']

            # Determine movie
            group['media'] = self.determineMedia(
                group, release_download=release_download)
            if not group['media']:
                log.error('Unable to determine media: %s',
                          group['identifiers'])
            else:
                group['identifier'] = getIdentifier(
                    group['media']) or group['media']['info'].get('imdb')

            processed_movies[identifier] = group

            # Notify parent & progress on something found
            if on_found:
                on_found(group, total_found, len(valid_files))

            # Wait for all the async events calm down a bit
            while threading.activeCount() > 100 and not self.shuttingDown():
                log.debug('Too many threads active, waiting a few seconds')
                time.sleep(10)

        if len(processed_movies) > 0:
            log.info('Found %s movies in the folder %s',
                     (len(processed_movies), folder))
        else:
            log.debug('Found no movies in the folder %s', folder)

        return processed_movies
Пример #41
0
    def _searchOnTitle(self, title, movie, quality, results):

        torrentlist = []

        if self.conf('only_freeleech'):
            onlyFreelech = True
        else:
            onlyFreelech = False

        if self.conf('only_verified'):
            onlyVerified = True
        else:
            onlyVerified = False

        if not '/logout.php' in self.urlopen(self.urls['login'], data = self.getLoginParams()).lower():
            log.info('problems logging into tehconnection.eu')
            return []

        data = self.getHTMLData(self.urls['search'] % tryUrlencode(getIdentifier(movie)))
        if data:
            try:
                resultsTable = BeautifulSoup(data).find('table', attrs = {'id' : 'browse_torrent_table'})
                if resultsTable is None:
                    log.info('movie not found on TehConnection')
                    return []

                pagelinkdata = resultsTable.find("a", { "title" : "View Torrent" })
                torrentpage = (pagelinkdata.attrs['href']).strip()
                indivTorrData = self.getHTMLData(self.urls['baseurl'] + (torrentpage))

                soup = BeautifulSoup(indivTorrData)
                items = soup.findAll("div", { "class" : "torrent_widget box pad" })
                for item in items:

                    torrentData = TorrentDetails(0, 0, '', '', 0, '', '', False, False)


                    detailstats = item.find("div", { "class" : "details_stats" })

                    #seeders
                    seed = detailstats.find("img", { "title" : "Seeders" }).parent
                    torrentData.seeders = ((seed.text).strip())

                    #leechers
                    leech = detailstats.find("img", { "title" : "Leechers" }).parent
                    torrentData.leechers = ((leech.text).strip())

                    #permalink
                    perma = detailstats.find("a", { "title" : "Permalink" })
                    torrentData.permalink = self.urls['baseurl'] + perma.attrs['href']

                    #download link
                    downlo = detailstats.find("a", { "title" : "Download" })
                    torrentData.downlink = self.urls['baseurl'] + downlo.attrs['href']

                    #Torrent ID
                    m = re.search(r'\d+$', torrentData.permalink)
                    torrentData.torrentID = (int(m.group()) if m else None)

                    #TorrentName
                    namedata = item.find("div", { "id" : "desc_%s" % torrentData.torrentID })
                    torrentData.torrentName = ((namedata.text).splitlines()[1]).strip()

                    #FileSize
                    sizedata = item.find("div", { "class" : "details_title" })
                    sizefile = ((sizedata.text).splitlines()[3]).replace("(","").replace(")","").strip()
                    torrentData.filesize = sizefile

                    #FreeLeech
                    freeleechdata = item.find("span", { "class" : "freeleech" })
                    if freeleechdata is None:
                        torrentData.freeleech = False
                    else:
                        torrentData.freeleech = True

                    #QualityEncode
                    qualityenc = detailstats.find("img", { "class" : "approved" })
                    if qualityenc is None:
                        torrentData.qualityEncode = False
                    else:
                        torrentData.torrentName += " HQ"
                        torrentData.qualityEncode = True

                    #Test if the Freelech or Verified boxes have been checked & add depending
                    if (onlyFreelech == False) or (onlyFreelech == True and torrentData.freeleech == True):
                        #Only Freelech is switched off OR only Freelech is ON and the torrent is a freelech, so safe to add to results
                        if (onlyVerified == False) or (onlyVerified == True and torrentData.qualityEncode == True):
                            #Only Verified is switched off OR only Verified is ON and the torrent is verified, so safe to add to results
                            torrentlist.append(torrentData)


                for torrentFind in torrentlist:
                    log.info('TehConnection found ' + torrentFind.torrentName)
                    results.append({
                        'leechers': torrentFind.leechers,
                        'seeders': torrentFind.seeders,
                        'name': torrentFind.torrentName,
                        'url': torrentFind.downlink,
                        'detail_url': torrentFind.permalink,
                        'id': torrentFind.torrentID,
                        'size': self.parseSize(torrentFind.filesize)
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Пример #42
0
    def _search(self, movie, quality, results):
        imdbId = getIdentifier(movie).replace("t", "")
        url = self.urls['search'] % (imdbId)  #, cats[0])
        data = self.getHTMLData(url)

        if data:

            # Remove HDSpace NEW list
            split_data = data.partition(
                '<form name="tcategories" action="index.php" method="post">')
            data = split_data[2]

            html = BeautifulSoup(data)
            try:
                #Now attempt to get any others
                result_table = html.find('table', attrs={'class': 'lista'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')
                log.info("entries length: %s", len(entries))

                if not entries:
                    return

                for result in entries:
                    block2 = result.find_all('td', attrs={'class': 'header'})
                    # Ignore header
                    if block2:
                        continue
                    cells = result.find_all('td')
                    log.info("cells length: %s", len(cells))

                    extend = 0
                    detail = cells[1 + extend].find('a')['href']
                    torrent_id = detail.replace(
                        'index.php?page=torrent-details&id=', '')
                    try:
                        torrent_age = datetime.now() - datetime.strptime(
                            cells[4 + extend].get_text().encode(
                                'ascii', 'ignore'), '%B %d, %Y,%H:%M:%S')
                    except:
                        torrent_age = timedelta(1)

                    results.append({
                        'id':
                        torrent_id,
                        'name':
                        cells[9 + extend].find('a')['title'].strip(
                            'History - ').replace('Blu-ray', 'bd50'),
                        'url':
                        self.urls['home'] %
                        cells[3 + extend].find('a')['href'],
                        'detail_url':
                        self.urls['home'] %
                        cells[1 + extend].find('a')['href'],
                        'size':
                        self.parseSize(cells[5 + extend].get_text()),
                        'age':
                        torrent_age.days,
                        'seeders':
                        tryInt(cells[7 + extend].find('a').get_text()),
                        'leechers':
                        tryInt(cells[8 + extend].find('a').get_text()),
                        'get_more_info':
                        self.getMoreInfo,
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))