Ejemplo n.º 1
0
    def notify(self, message='', data=None, listener=None):
        if not data: data = {}

        api_data = {
            'user': self.conf('user_key'),
            'token': self.conf('api_token'),
            'message': to_unicode(message),
            'priority': self.conf('priority'),
            'sound': self.conf('sound'),
        }

        if data and get_identifier(data):
            api_data.update({
                'url':
                to_unicode('http://www.imdb.com/title/%s/' %
                           get_identifier(data)),
                'url_title':
                to_unicode('%s on IMDb' % get_title(data)),
            })

        try:
            data = self.urlopen(
                '%s/%s' % (self.api_url, '1/messages.json'),
                headers={'Content-type': 'application/x-www-form-urlencoded'},
                data=api_data)
            log.info2('Pushover responded with: %s', data)
            return True
        except:
            return False
Ejemplo n.º 2
0
    def searchAll(self, manual=False):

        if self.in_progress:
            log.info('Search already in progress')
            fire_event('notify.frontend',
                       type='movie.searcher.already_started',
                       data=True,
                       message='Full search already in progress')
            return

        self.in_progress = True
        fire_event('notify.frontend',
                   type='movie.searcher.started',
                   data=True,
                   message='Full search started')

        medias = [
            x['_id'] for x in fire_event('media.with_status',
                                         'active',
                                         types='movie',
                                         with_doc=False,
                                         single=True)
        ]
        random.shuffle(medias)

        total = len(medias)
        self.in_progress = {
            'total': total,
            'to_go': total,
        }

        try:
            search_protocols = fire_event('searcher.protocols', single=True)

            for media_id in medias:

                media = fire_event('media.get', media_id, single=True)
                if not media: continue

                try:
                    self.single(media, search_protocols, manual=manual)
                except IndexError:
                    log.error(
                        'Forcing library update for %s, if you see this often, please report: %s',
                        (get_identifier(media), traceback.format_exc()))
                    fire_event('movie.update', media_id)
                except:
                    log.error('Search failed for %s: %s',
                              (get_identifier(media), traceback.format_exc()))

                self.in_progress['to_go'] -= 1

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

        except SearchSetupError:
            pass

        self.in_progress = False
Ejemplo n.º 3
0
    def _search(self, movie, quality, results):

        match = re.match(r'tt(\d{7})', get_identifier(movie))

        data = self._post_query(imdb={'id': match.group(1)})

        if data:
            try:
                for result in data:
                    results.append({
                        'id':
                        result['id'],
                        'name':
                        result['name'],
                        'url':
                        self.urls['download'] %
                        (result['id'], self.conf('passkey')),
                        'detail_url':
                        self.urls['detail'] % result['id'],
                        'size':
                        try_int(result['size']) / 1024 / 1024,
                        'seeders':
                        try_int(result['seeders']),
                        'leechers':
                        try_int(result['leechers'])
                    })
            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Ejemplo n.º 4
0
    def updateReleaseDate(self, media_id):
        """
        Update release_date (eta) info only

        @param media_id: document id
        @return: dict, with dates dvd, theater, bluray, expires
        """

        try:
            db = get_db()

            media = db.get('id', media_id)

            if not media.get('info'):
                media = self.update(media_id)
                dates = media.get('info', {}).get('release_date')
            else:
                dates = media.get('info').get('release_date')

            if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
                dates = fire_event('movie.info.release_date', identifier=get_identifier(media), merge=True)
                media['info'].update({'release_date': dates})
                db.update(media)

            return dates
        except:
            log.error('Failed updating release dates: %s', traceback.format_exc())

        return {}
Ejemplo n.º 5
0
    def search(self, group):

        movie_name = get_title(group)

        url = self.urls['api'] % self.movieUrlName(movie_name)
        try:
            data = self.getCache('hdtrailers.%s' % get_identifier(group), url, show_error=False)
        except HTTPError:
            log.debug('No page found for: %s', movie_name)
            data = None

        result_data = {'480p': [], '720p': [], '1080p': []}

        if not data:
            return result_data

        did_alternative = False
        for provider in self.providers:
            results = self.findByProvider(data, provider)

            # Find alternative
            if results.get('404') and not did_alternative:
                results = self.findViaAlternative(group)
                did_alternative = True

            result_data = merge_dictionaries(result_data, results)

        return result_data
Ejemplo n.º 6
0
    def findViaAlternative(self, group):
        results = {'480p': [], '720p': [], '1080p': []}

        movie_name = get_title(group)

        url = "%s?%s" % (self.urls['backup'], try_url_encode({'s': movie_name}))
        try:
            data = self.getCache('hdtrailers.alt.%s' % get_identifier(group), url, show_error=False)
        except HTTPError:
            log.debug('No alternative page found for: %s', movie_name)
            data = None

        if not data:
            return results

        try:
            html = BeautifulSoup(data, parse_only = self.only_tables_tags)
            result_table = html.find_all('h2', text = re.compile(movie_name))

            for h2 in result_table:
                if 'trailer' in h2.lower():
                    parent = h2.parent.parent.parent
                    trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p'))
                    try:
                        for trailer in trailerLinks:
                            results[trailer].insert(0, trailer.parent['href'])
                    except:
                        pass

        except AttributeError:
            log.debug('No trailers found in via alternative.')

        return results
Ejemplo n.º 7
0
    def notify(self, message='', data=None, listener=None):
        if not data: data = {}

        # Get configuration data
        token = self.conf('bot_token')
        usr_id = self.conf('receiver_user_id')

        # Add IMDB url to message:
        if data:
            imdb_id = get_identifier(data)
            if imdb_id:
                url = 'http://www.imdb.com/title/{0}/'.format(imdb_id)
                message = '{0}\n{1}'.format(message, url)

        # Cosntruct message
        payload = {
            'chat_id': usr_id,
            'text': message,
            'parse_mode': 'Markdown'
        }

        # Send message user Telegram's Bot API
        response = requests.post(self.TELEGRAM_API % (token, "sendMessage"),
                                 data=payload)

        # Error logging
        sent_successfuly = True
        if not response.status_code == 200:
            log.error(
                'Could not send notification to TelegramBot (token=%s). Response: [%s]',
                (token, response.text))
            sent_successfuly = False

        return sent_successfuly
Ejemplo n.º 8
0
    def suggestView(self, limit=6, **kwargs):
        if self.is_disabled():
            return {'success': True, 'movies': []}

        movies = split_string(kwargs.get('movies', ''))
        ignored = split_string(kwargs.get('ignored', ''))
        seen = split_string(kwargs.get('seen', ''))

        cached_suggestion = self.getCache('suggestion_cached')
        if cached_suggestion:
            suggestions = cached_suggestion
        else:

            if not movies or len(movies) == 0:
                active_movies = fire_event('media.with_status',
                                           ['active', 'done'],
                                           types='movie',
                                           single=True)
                movies = [get_identifier(x) for x in active_movies]

            if not ignored or len(ignored) == 0:
                ignored = split_string(Env.prop('suggest_ignore', default=''))
            if not seen or len(seen) == 0:
                movies.extend(
                    split_string(Env.prop('suggest_seen', default='')))

            suggestions = fire_event('movie.suggest',
                                     movies=movies,
                                     ignore=ignored,
                                     single=True)
            self.setCache('suggestion_cached', suggestions,
                          timeout=6048000)  # Cache for 10 weeks

        medias = []
        for suggestion in suggestions[:int(limit)]:

            # Cache poster
            posters = suggestion.get('images', {}).get('poster', [])
            poster = [x for x in posters if 'tmdb' in x]
            posters = poster if len(poster) > 0 else posters

            cached_poster = fire_event(
                'file.download', url=posters[0],
                single=True) if len(posters) > 0 else False
            files = {'image_poster': [cached_poster]} if cached_poster else {}

            medias.append({
                'status': 'suggested',
                'title': get_title(suggestion),
                'type': 'movie',
                'info': suggestion,
                'files': files,
                'identifiers': {
                    'imdb': suggestion.get('imdb')
                }
            })

        return {'success': True, 'movies': medias}
Ejemplo n.º 9
0
    def _search(self, movie, quality, results):
        limit = 10
        page = 1
        data = self.getJsonData(self.urls['search'] %
                                (get_identifier(movie), limit, page))

        if data:
            movie_count = try_int(data['data']['movie_count'])

            if movie_count == 0:
                log.debug('%s - found no results', (self.getName()))
            else:

                movie_results = data['data']['movies']
                for i in range(0, len(movie_results)):
                    result = data['data']['movies'][i]
                    name = result['title']
                    year = result['year']
                    detail_url = result['url']

                    for torrent in result['torrents']:
                        t_quality = torrent['quality']

                        if t_quality in quality['label']:
                            hash = torrent['hash']
                            size = try_int(torrent['size_bytes'] / 1048576)
                            seeders = try_int(torrent['seeds'])
                            leechers = try_int(torrent['peers'])
                            pubdate = torrent[
                                'date_uploaded']  # format: 2017-02-17 18:40:03
                            pubdate = datetime.strptime(
                                pubdate, '%Y-%m-%d %H:%M:%S')
                            age = (datetime.now() - pubdate).days

                            results.append({
                                'id':
                                random.randint(100, 9999),
                                'name':
                                '%s (%s) %s %s %s' %
                                (name, year, 'YTS', t_quality, 'BR-Rip'),
                                'url':
                                self.make_magnet(hash, name),
                                'size':
                                size,
                                'seeders':
                                seeders,
                                'leechers':
                                leechers,
                                'age':
                                age,
                                'detail_url':
                                detail_url,
                                'score':
                                1
                            })

        return
Ejemplo n.º 10
0
 def buildUrl(self, media, host):
     arguments = try_url_encode({
         'user':
         host['name'],
         'passkey':
         host['pass_key'],
         'imdbid':
         get_identifier(media),
         'search':
         get_title(media) + ' ' + str(media['info']['year']),
     })
     return '%s?%s' % (host['host'], arguments)
Ejemplo n.º 11
0
    def notify(self, message='', data=None, listener=None):
        if not data: data = {}

        post_data = {'message': to_unicode(message)}

        if get_identifier(data):
            post_data.update({'imdb_id': get_identifier(data)})

        headers = {'Content-type': 'application/x-www-form-urlencoded'}

        try:
            self.urlopen(self.conf('url'),
                         headers=headers,
                         data=post_data,
                         show_error=False)
            return True
        except:
            log.error('Webhook notification failed: %s',
                      traceback.format_exc())

        return False
Ejemplo n.º 12
0
 def buildUrl(self, media, quality):
     query = try_url_encode({
         'q': get_identifier(media),
         'm': 'n',
         'max': 400,
         'adv_age': Env.setting('retention', 'nzb'),
         'adv_sort': 'date',
         'adv_col': 'on',
         'adv_nfo': 'on',
         'xminsize': quality.get('size_min'),
         'xmaxsize': quality.get('size_max'),
     })
     return query
Ejemplo n.º 13
0
    def cpTag(self, media, unique_tag=False):

        tag = ''
        if Env.setting('enabled', 'renamer') or unique_tag:
            identifier = get_identifier(media) or ''
            unique_tag = ', ' + random_string() if unique_tag else ''

            tag = '.cp('
            tag += identifier
            tag += ', ' if unique_tag and identifier else ''
            tag += random_string() if unique_tag else ''
            tag += ')'

        return tag if len(tag) > 7 else ''
Ejemplo n.º 14
0
    def create(self, message=None, group=None):
        if self.is_disabled(): return
        if not group: group = {}

        log.info('Creating %s metadata.', self.getName())

        # Update library to get latest info
        try:
            group['media'] = fire_event('movie.update',
                                        group['media'].get('_id'),
                                        identifier=get_identifier(
                                            group['media']),
                                        extended=True,
                                        single=True)
        except:
            log.error('Failed to update movie, before creating metadata: %s',
                      traceback.format_exc())

        root_name = to_unicode(self.getRootName(group))
        meta_name = to_unicode(os.path.basename(root_name))
        root = to_unicode(os.path.dirname(root_name))

        movie_info = group['media'].get('info')

        for file_type in ['nfo']:
            try:
                self._createType(meta_name, root, movie_info, group, file_type,
                                 0)
            except:
                log.error('Unable to create %s file: %s',
                          ('nfo', traceback.format_exc()))

        for file_type in [
                'thumbnail', 'fanart', 'banner', 'disc_art', 'logo',
                'clear_art', 'landscape', 'extra_thumbs', 'extra_fanart'
        ]:
            try:
                if file_type == 'thumbnail':
                    num_images = len(movie_info['images']['poster_original'])
                elif file_type == 'fanart':
                    num_images = len(movie_info['images']['backdrop_original'])
                else:
                    num_images = len(movie_info['images'][file_type])

                for i in range(num_images):
                    self._createType(meta_name, root, movie_info, group,
                                     file_type, i)
            except:
                log.error('Unable to create %s file: %s',
                          (file_type, traceback.format_exc()))
Ejemplo n.º 15
0
    def _search(self, movie, quality, results):
        data = self.getJsonData(self.urls['search'] % (
        self.conf('apikey'), self.conf('username'), get_identifier(movie), self.conf('internal_only')))

        if data:
            if 'error' in data:
                if self.login_fail_msg in data['error']: # Check for login failure
                    self.disableAccount()
                else:
                    log.error('%s returned an error (possible rate limit): %s', (self.getName(), data['error']))
                return

            try:
                #for result in data[]:
                for key, result in list(data.items()):
                    if try_int(result['total_results']) == 0:
                        return
                    torrentscore = self.conf('extra_score')
                    releasegroup = result['releasegroup']
                    resolution = result['resolution']
                    encoding = result['encoding']
                    freeleech = try_int(result['freeleech'])
                    seeders = try_int(result['seeders'])
                    torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)

                    if freeleech > 0 and self.conf('prefer_internal'):
                        torrent_desc += '/ Internal'
                        torrentscore += 200

                    if seeders == 0:
                        torrentscore = 0

                    name = result['release_name']
                    year = try_int(result['year'])

                    results.append({
                        'id': try_int(result['torrentid']),
                        'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
                        'url': self.urls['download'] % (result['torrentid'], result['torrentpass']),
                        'detail_url': self.urls['detail'] % result['torrentid'],
                        'size': try_int(result['size']),
                        'seeders': try_int(result['seeders']),
                        'leechers': try_int(result['leechers']),
                        'age': try_int(result['age']),
                        'score': torrentscore
                    })
            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 16
0
    def buildUrl(self, media, host):

        query = try_url_encode({
            't':
            'movie',
            'imdbid':
            get_identifier(media).replace('tt', ''),
            'apikey':
            host['api_key'],
            'extended':
            1
        })

        if len(host.get('custom_tag', '')) > 0:
            query = '%s&%s' % (query, host.get('custom_tag'))

        if len(host['custom_category']) > 0:
            query = '%s&cat=%s' % (query, host['custom_category'])

        return query
Ejemplo n.º 17
0
    def updateSuggestionCache(self,
                              ignore_imdb=None,
                              limit=6,
                              ignored=None,
                              seen=None):

        # Combine with previous suggestion_cache
        cached_suggestion = self.getCache('suggestion_cached') or []
        new_suggestions = []
        ignored = [] if not ignored else ignored
        seen = [] if not seen else seen

        if ignore_imdb:
            suggested_imdbs = []
            for cs in cached_suggestion:
                if cs.get('imdb') != ignore_imdb and cs.get(
                        'imdb') not in suggested_imdbs:
                    suggested_imdbs.append(cs.get('imdb'))
                    new_suggestions.append(cs)

        # Get new results and add them
        if len(new_suggestions) - 1 < limit:
            active_movies = fire_event('media.with_status', ['active', 'done'],
                                       single=True)
            movies = [get_identifier(x) for x in active_movies]
            movies.extend(seen)

            ignored.extend([x.get('imdb') for x in cached_suggestion])
            suggestions = fire_event('movie.suggest',
                                     movies=movies,
                                     ignore=remove_duplicate(ignored),
                                     single=True)

            if suggestions:
                new_suggestions.extend(suggestions)

        self.setCache('suggestion_cached', new_suggestions, timeout=3024000)

        return new_suggestions
Ejemplo n.º 18
0
    def notify(self, message='', data=None, listener=None):
        if not data: data = {}

        if listener == 'test':
            result = self.call((self.urls['test']))

            return result

        else:

            post_data = {
                'movies': [{
                    'ids': {
                        'imdb': get_identifier(data)
                    }
                }] if data else []
            }

            result = self.call((self.urls['library']), post_data)
            if self.conf('remove_watchlist_enabled'):
                result = result and self.call(
                    (self.urls['unwatchlist']), post_data)

            return result
Ejemplo n.º 19
0
    def _search(self, media, quality, results):

        movie_title = get_title(media)
        quality_id = quality['identifier']

        params = merge_dictionaries(self.quality_search_params[quality_id].copy(), {
            'order_by': 'relevance',
            'order_way': 'descending',
            'searchstr': get_identifier(media)
        })

        url = '%s?json=noredirect&%s' % (self.urls['torrent'], try_url_encode(params))
        res = self.getJsonData(url)

        try:
            if not 'Movies' in res:
                return

            authkey = res['AuthKey']
            passkey = res['PassKey']

            for ptpmovie in res['Movies']:
                if not 'Torrents' in ptpmovie:
                    log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year']))
                    continue

                log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents'])))
                for torrent in ptpmovie['Torrents']:
                    torrent_id = try_int(torrent['Id'])
                    torrentdesc = ''
                    torrentscore = 0

                    if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']:
                        torrentdesc += ' HQ'
                        if self.conf('prefer_golden'):
                            torrentscore += 5000
                    if 'FreeleechType' in torrent:
                        torrentdesc += ' Freeleech'
                        if self.conf('prefer_freeleech'):
                            torrentscore += 7000
                    if 'Scene' in torrent and torrent['Scene']:
                        torrentdesc += ' Scene'
                        if self.conf('prefer_scene'):
                            torrentscore += 2000
                        if self.conf('no_scene'):
                            torrentscore -= 2000
                    if 'RemasterTitle' in torrent and torrent['RemasterTitle']:
                        torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle'])

                    torrent_name = torrent['ReleaseName'] + ' - %s' % torrentdesc

                    def extra_check(item):
                        return self.torrentMeetsQualitySpec(item, quality_id)

                    results.append({
                        'id': torrent_id,
                        'name': torrent_name,
                        'Source': torrent['Source'],
                        'Checked': 'true' if torrent['Checked'] else 'false',
                        'Resolution': torrent['Resolution'],
                        'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'date': try_int(time.mktime(parse(torrent['UploadTime']).timetuple())),
                        'size': try_int(torrent['Size']) / (1024 * 1024),
                        'seeders': try_int(torrent['Seeders']),
                        'leechers': try_int(torrent['Leechers']),
                        'score': torrentscore,
                        'extra_check': extra_check,
                    })

        except:
            log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 20
0
    def _search(self, movie, quality, results):

        data = self.getHTMLData(self.urls['search'] %
                                (self.conf('passkey'), get_identifier(movie),
                                 self.conf('only_internal')))

        if data:
            if self.login_fail_msg in data:
                self.disableAccount()
                return

            try:
                soup = BeautifulSoup(data)

                if soup.find('error'):
                    log.info(soup.find('error').get_text())
                    return

                authkey = soup.find('authkey').get_text()
                entries = soup.find_all('torrent')

                for entry in entries:

                    torrentscore = 0
                    torrent_id = entry.find('id').get_text()
                    name = entry.find('name').get_text()
                    year = entry.find('year').get_text()
                    releasegroup = entry.find('releasegroup').get_text()
                    resolution = entry.find('resolution').get_text()
                    encoding = entry.find('encoding').get_text()
                    freeleech = entry.find('freeleech').get_text()
                    media = entry.find('media').get_text()
                    audioformat = entry.find('audioformat').get_text()

                    # skip audio channel only releases
                    if resolution == '':
                        continue

                    torrent_desc = '%s.%s.%s.%s-%s' % (
                        resolution, media, audioformat, encoding, releasegroup)

                    if self.conf('prefer_internal') and freeleech in [
                            '0.25', '0.50'
                    ]:
                        torrentscore += 200

                    if encoding == 'x264' and self.conf('favor') in [
                            'encode', 'both'
                    ]:
                        torrentscore += 200
                    elif re.search('Remux',
                                   encoding) and self.conf('favor') in [
                                       'remux', 'both'
                                   ]:
                        torrentscore += 200

                    name = re.sub(r'\W', '.', name)
                    name = re.sub(r'\.+', '.', name)
                    results.append({
                        'id':
                        torrent_id,
                        'name':
                        '%s.%s.%s' % (name, year, torrent_desc),
                        'url':
                        self.urls['download'] %
                        (torrent_id, authkey, self.conf('passkey')),
                        'detail_url':
                        self.urls['detail'] % torrent_id,
                        'size':
                        try_int(entry.find('size').get_text()) / 1048576,
                        'seeders':
                        try_int(entry.find('seeders').get_text()),
                        'leechers':
                        try_int(entry.find('leechers').get_text()),
                        'score':
                        torrentscore
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Ejemplo n.º 21
0
    def update(self, media_id = None, identifier = None, default_title = None, extended = False):
        """
        Update movie information inside media['doc']['info']

        @param media_id: document id
        @param default_title: default title, if empty, use first one or existing one
        @param extended: update with extended info (parses more info, actors, images from some info providers)
        @return: dict, with media
        """

        if self.shuttingDown():
            return

        lock_key = 'media.get.%s' % media_id if media_id else identifier
        self.acquireLock(lock_key)

        media = {}
        try:
            db = get_db()

            if media_id:
                media = db.get('id', media_id)
            else:
                media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc']

            info = fire_event('movie.info', merge=True, extended=extended, identifier=get_identifier(media))

            # Don't need those here
            try: del info['in_wanted']
            except: pass
            try: del info['in_library']
            except: pass

            if not info or len(info) == 0:
                log.error('Could not update, no movie info to work with: %s', identifier)
                return False

            # Update basic info
            media['info'] = info

            titles = info.get('titles', [])
            log.debug('Adding titles: %s', titles)

            # Define default title
            if default_title or media.get('title') == 'UNKNOWN' or len(media.get('title', '')) == 0:
                media['title'] = self.getDefaultTitle(info, default_title)

            # Files
            image_urls = info.get('images', [])

            self.getPoster(media, image_urls)

            db.update(media)
        except:
            log.error('Failed update media: %s', traceback.format_exc())

        self.releaseLock(lock_key)
        return media
Ejemplo n.º 22
0
    def updateLibrary(self, full=True):
        last_update_key = 'manage.last_update%s' % ('_full' if full else '')
        last_update = float(Env.prop(last_update_key, default=0))

        if self.in_progress:
            log.info('Already updating library: %s', self.in_progress)
            return
        elif self.is_disabled() or (last_update > time.time() - 20):
            return

        self.in_progress = {}
        fire_event('notify.frontend', type='manage.updating', data=True)

        try:

            directories = self.directories()
            directories.sort()
            added_identifiers = []

            # Add some progress
            for directory in directories:
                self.in_progress[os.path.normpath(directory)] = {
                    'started': False,
                    'eta': -1,
                    'total': None,
                    'to_go': None,
                }

            for directory in directories:
                folder = os.path.normpath(directory)
                self.in_progress[os.path.normpath(
                    directory)]['started'] = try_int(time.time())

                if not os.path.isdir(folder):
                    if len(directory) > 0:
                        log.error('Directory doesn\'t exist: %s', folder)
                    continue

                log.info('Updating manage library: %s', folder)
                fire_event('notify.frontend',
                           type='manage.update',
                           data=True,
                           message='Scanning for movies in "%s"' % folder)

                onFound = self.createAddToLibrary(folder, added_identifiers)
                fire_event('scanner.scan',
                           folder=folder,
                           simple=True,
                           newer_than=last_update if not full else 0,
                           check_file_date=False,
                           on_found=onFound,
                           single=True)

                # Break if CP wants to shut down
                if self.shuttingDown():
                    break

            # If cleanup option is enabled, remove offline files from database
            if self.conf('cleanup') and full and not self.shuttingDown():

                # Get movies with done status
                total_movies, done_movies = fire_event('media.list',
                                                       types='movie',
                                                       status='done',
                                                       release_status='done',
                                                       status_or=True,
                                                       single=True)

                deleted_releases = []
                for done_movie in done_movies:
                    if get_identifier(done_movie) not in added_identifiers:
                        fire_event('media.delete',
                                   media_id=done_movie['_id'],
                                   delete_from='all')
                    else:

                        releases = done_movie.get('releases', [])

                        for release in releases:
                            if release.get('files'):
                                brk = False
                                for file_type in release.get('files', {}):
                                    for release_file in release['files'][
                                            file_type]:
                                        # Remove release not available anymore
                                        if not os.path.isfile(
                                                sp(release_file)):
                                            fire_event('release.clean',
                                                       release['_id'])
                                            brk = True
                                            break
                                    if brk:
                                        break

                        # Check if there are duplicate releases (different quality) use the last one, delete the rest
                        if len(releases) > 1:
                            used_files = {}
                            for release in releases:
                                for file_type in release.get('files', {}):
                                    for release_file in release['files'][
                                            file_type]:
                                        already_used = used_files.get(
                                            release_file)

                                        if already_used:
                                            release_id = release[
                                                '_id'] if already_used.get(
                                                    'last_edit',
                                                    0) > release.get(
                                                        'last_edit', 0
                                                    ) else already_used['_id']
                                            if release_id not in deleted_releases:
                                                fire_event('release.delete',
                                                           release_id,
                                                           single=True)
                                                deleted_releases.append(
                                                    release_id)
                                            break
                                        else:
                                            used_files[release_file] = release
                            del used_files

                    # Break if CP wants to shut down
                    if self.shuttingDown():
                        break

                if not self.shuttingDown():
                    db = get_db()
                    db.reindex()

            Env.prop(last_update_key, time.time())
        except:
            log.error('Failed updating library: %s', (traceback.format_exc()))

        while self.in_progress and len(
                self.in_progress) > 0 and not self.shuttingDown():

            delete_me = {}

            # noinspection PyTypeChecker
            for folder in self.in_progress:
                if self.in_progress[folder]['to_go'] <= 0:
                    delete_me[folder] = True

            for delete in delete_me:
                del self.in_progress[delete]

            time.sleep(1)

        fire_event('notify.frontend', type='manage.updating', data=False)
        self.in_progress = False
Ejemplo n.º 23
0
    def scan(self,
             folder=None,
             files=None,
             release_download=None,
             simple=False,
             newer_than=0,
             return_ignored=True,
             check_file_date=True,
             on_found=None):

        folder = sp(folder)

        if not folder or not os.path.isdir(folder):
            log.error('Folder doesn\'t exists: %s', folder)
            return {}

        # Get movie "master" files
        movie_files = {}
        leftovers = []

        # Scan all files of the folder if no files are set
        if not files:
            try:
                files = []
                for root, dirs, walk_files in os.walk(folder,
                                                      followlinks=True):
                    files.extend([
                        sp(os.path.join(sp(root), ss(filename)))
                        for filename in walk_files
                    ])

                    # Break if CP wants to shut down
                    if self.shuttingDown():
                        break

            except:
                log.error('Failed getting files from %s: %s',
                          (folder, traceback.format_exc()))

            log.debug('Found %s files to scan and group in %s',
                      (len(files), folder))
        else:
            check_file_date = False
            files = [sp(x) for x in files]

        for file_path in files:

            if not os.path.exists(file_path):
                continue

            # Remove ignored files
            if self.isSampleFile(file_path):
                leftovers.append(file_path)
                continue
            elif not self.keepFile(file_path):
                continue

            is_dvd_file = self.isDVDFile(file_path)
            if self.filesizeBetween(
                    file_path, self.file_sizes['movie']
            ) or is_dvd_file:  # Minimal 300MB files or is DVD file

                # Normal identifier
                identifier = self.createStringIdentifier(
                    file_path, folder, exclude_filename=is_dvd_file)
                identifiers = [identifier]

                # Identifier with quality
                quality = fire_event('quality.guess',
                                     files=[file_path],
                                     size=self.getFileSize(file_path),
                                     single=True) if not is_dvd_file else {
                                         'identifier': 'dvdr'
                                     }
                if quality:
                    identifier_with_quality = '%s %s' % (
                        identifier, quality.get('identifier', ''))
                    identifiers = [identifier_with_quality, identifier]

                if not movie_files.get(identifier):
                    movie_files[identifier] = {
                        'unsorted_files': [],
                        'identifiers': identifiers,
                        'is_dvd': is_dvd_file,
                    }

                movie_files[identifier]['unsorted_files'].append(file_path)
            else:
                leftovers.append(file_path)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleanup
        del files

        # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
        # files will be grouped first.
        leftovers = set(sorted(leftovers, reverse=True))

        # Group files minus extension
        ignored_identifiers = []
        for identifier, group in list(movie_files.items()):
            if identifier not in group['identifiers'] and len(identifier) > 0:
                group['identifiers'].append(identifier)

            log.debug('Grouping files: %s', identifier)

            has_ignored = 0
            for file_path in list(group['unsorted_files']):
                ext = get_extension(file_path)
                wo_ext = file_path[:-(len(ext) + 1)]
                found_files = set([i for i in leftovers if wo_ext in i])
                group['unsorted_files'].extend(found_files)
                leftovers = leftovers - found_files

                has_ignored += 1 if ext in self.ignored_extensions else 0

            if has_ignored == 0:
                for file_path in list(group['unsorted_files']):
                    ext = get_extension(file_path)
                    has_ignored += 1 if ext in self.ignored_extensions else 0

            if has_ignored > 0:
                ignored_identifiers.append(identifier)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Create identifiers for all leftover files
        path_identifiers = {}
        for file_path in leftovers:
            identifier = self.createStringIdentifier(file_path, folder)

            if not path_identifiers.get(identifier):
                path_identifiers[identifier] = []

            path_identifiers[identifier].append(file_path)

        # Group the files based on the identifier
        delete_identifiers = []
        for identifier, found_files in list(path_identifiers.items()):
            log.debug('Grouping files on identifier: %s', identifier)

            group = movie_files.get(identifier)
            if group:
                group['unsorted_files'].extend(found_files)
                delete_identifiers.append(identifier)

                # Remove the found files from the leftover stack
                leftovers = leftovers - set(found_files)

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Group based on folder
        delete_identifiers = []
        for identifier, found_files in list(path_identifiers.items()):
            log.debug('Grouping files on foldername: %s', identifier)

            for ff in found_files:
                new_identifier = self.createStringIdentifier(
                    os.path.dirname(ff), folder)

                group = movie_files.get(new_identifier)
                if group:
                    group['unsorted_files'].extend([ff])
                    delete_identifiers.append(identifier)

                    # Remove the found files from the leftover stack
                    leftovers -= leftovers - set([ff])

            # Break if CP wants to shut down
            if self.shuttingDown():
                break

        # leftovers should be empty
        if leftovers:
            log.debug('Some files are still left over: %s', leftovers)

        # Cleaning up used
        for identifier in delete_identifiers:
            if path_identifiers.get(identifier):
                del path_identifiers[identifier]
        del delete_identifiers

        # Make sure we remove older / still extracting files
        valid_files = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = movie_files.popitem()
            except:
                break

            # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute
            if check_file_date:
                files_too_new, time_string = self.checkFilesChanged(
                    group['unsorted_files'])
                if files_too_new:
                    log.info(
                        'Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s',
                        (time_string, identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            # Only process movies newer than x
            if newer_than and newer_than > 0:
                has_new_files = False
                for cur_file in group['unsorted_files']:
                    file_time = self.getFileTimes(cur_file)
                    if file_time[0] > newer_than or file_time[1] > newer_than:
                        has_new_files = True
                        break

                if not has_new_files:
                    log.debug(
                        'None of the files have changed since %s for %s, skipping.',
                        (time.ctime(newer_than), identifier))

                    # Delete the unsorted list
                    del group['unsorted_files']

                    continue

            valid_files[identifier] = group

        del movie_files

        total_found = len(valid_files)

        # Make sure only one movie was found if a download ID is provided
        if release_download and total_found == 0:
            log.info(
                'Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).',
                release_download.get('imdb_id'))
        elif release_download and total_found > 1:
            log.info(
                'Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...',
                (release_download.get('imdb_id'), len(valid_files)))
            release_download = None

        # Determine file types
        processed_movies = {}
        while True and not self.shuttingDown():
            try:
                identifier, group = valid_files.popitem()
            except:
                break

            if return_ignored is False and identifier in ignored_identifiers:
                log.debug('Ignore file found, ignoring release: %s',
                          identifier)
                total_found -= 1
                continue

            # Group extra (and easy) files first
            group['files'] = {
                'movie_extra': self.getMovieExtras(group['unsorted_files']),
                'subtitle': self.getSubtitles(group['unsorted_files']),
                'subtitle_extra':
                self.getSubtitlesExtras(group['unsorted_files']),
                'nfo': self.getNfo(group['unsorted_files']),
                'trailer': self.getTrailers(group['unsorted_files']),
                'leftover': set(group['unsorted_files']),
            }

            # Media files
            if group['is_dvd']:
                group['files']['movie'] = self.getDVDFiles(
                    group['unsorted_files'])
            else:
                group['files']['movie'] = self.getMediaFiles(
                    group['unsorted_files'])

            if len(group['files']['movie']) == 0:
                log.error('Couldn\'t find any movie files for %s', identifier)
                total_found -= 1
                continue

            log.debug('Getting metadata for %s', identifier)
            group['meta_data'] = self.getMetaData(
                group, folder=folder, release_download=release_download)

            # Subtitle meta
            group['subtitle_language'] = self.getSubtitleLanguage(
                group) if not simple else {}

            # Get parent dir from movie files
            for movie_file in group['files']['movie']:
                group['parentdir'] = os.path.dirname(movie_file)
                group['dirname'] = None

                folder_names = group['parentdir'].replace(folder, '').split(
                    os.path.sep)
                folder_names.reverse()

                # Try and get a proper dirname, so no "A", "Movie", "Download" etc
                for folder_name in folder_names:
                    if folder_name.lower(
                    ) not in self.ignore_names and len(folder_name) > 2:
                        group['dirname'] = folder_name
                        break

                break

            # Leftover "sorted" files
            for file_type in group['files']:
                if not file_type is 'leftover':
                    group['files']['leftover'] -= set(
                        group['files'][file_type])
                    group['files'][file_type] = list(group['files'][file_type])
            group['files']['leftover'] = list(group['files']['leftover'])

            # Delete the unsorted list
            del group['unsorted_files']

            # Determine movie
            group['media'] = self.determineMedia(
                group, release_download=release_download)
            if not group['media']:
                log.error('Unable to determine media: %s',
                          group['identifiers'])
            else:
                group['identifier'] = get_identifier(
                    group['media']) or group['media']['info'].get('imdb')

            processed_movies[identifier] = group

            # Notify parent & progress on something found
            if on_found:
                on_found(group, total_found, len(valid_files))

            # Wait for all the async events calm down a bit
            while threading.activeCount() > 100 and not self.shuttingDown():
                log.debug('Too many threads active, waiting a few seconds')
                time.sleep(10)

        if len(processed_movies) > 0:
            log.info('Found %s movies in the folder %s',
                     (len(processed_movies), folder))
        else:
            log.debug('Found no movies in the folder %s', folder)

        return processed_movies
Ejemplo n.º 24
0
    def _search(self, movie, quality, results):
        hasresults = 0
        curryear = datetime.now().year
        movieid = get_identifier(movie)

        try:
            movieyear = movie['info']['year']
        except:
            log.error('RARBG: Couldn\'t get movie year')
            movieyear = 0

        self.getToken()

        if (self._token != 0) and (movieyear == 0 or movieyear <= curryear):
            data = self.getJsonData(
                self.urls['search'] %
                (self._token, movieid, self.conf('min_seeders'),
                 self.conf('min_leechers'), self.conf('ranked_only')),
                headers=self.getRequestHeaders())

            if data:
                if 'error_code' in data:
                    if data['error'] == 'No results found':
                        log.debug('RARBG: No results returned from Rarbg')
                    else:
                        if data['error_code'] == 10:
                            log.error(data['error'], movieid)
                        else:
                            log.error(
                                'RARBG: There is an error in the returned JSON: %s',
                                data['error'])
                else:
                    hasresults = 1

                try:
                    if hasresults:
                        for result in data['torrent_results']:
                            name = result['title']
                            titlesplit = re.split('-', name)
                            releasegroup = titlesplit[len(titlesplit) - 1]

                            xtrainfo = self.find_info(name)
                            encoding = xtrainfo[0]
                            resolution = xtrainfo[1]
                            # source = xtrainfo[2]
                            pubdate = result['pubdate']  # .strip(' +0000')
                            try:
                                pubdate = datetime.strptime(
                                    pubdate, '%Y-%m-%d %H:%M:%S +0000')
                                now = datetime.utcnow()
                                age = (now - pubdate).days
                            except ValueError:
                                log.debug('RARBG: Bad pubdate')
                                age = 0

                            torrentscore = self.conf('extra_score')
                            seeders = try_int(result['seeders'])
                            torrent_desc = '/ %s / %s / %s / %s seeders' % (
                                releasegroup, resolution, encoding, seeders)

                            if seeders == 0:
                                torrentscore = 0

                            sliceyear = result['pubdate'][0:4]
                            year = try_int(sliceyear)

                            results.append({
                                'id':
                                random.randint(100, 9999),
                                'name':
                                re.sub(
                                    '[^A-Za-z0-9\-_ \(\).]+', '',
                                    '%s (%s) %s' % (name, year, torrent_desc)),
                                'url':
                                result['download'],
                                'detail_url':
                                result['info_page'],
                                'size':
                                try_int(result['size'] /
                                        1048576),  # rarbg sends in bytes
                                'seeders':
                                try_int(result['seeders']),
                                'leechers':
                                try_int(result['leechers']),
                                'age':
                                try_int(age),
                                'score':
                                torrentscore
                            })

                except RuntimeError:
                    log.error('RARBG: Failed getting results from %s: %s',
                              (self.getName(), traceback.format_exc()))
Ejemplo n.º 25
0
    def _search(self, media, quality, results):

        data = self.getHTMLData(
            self.urls['search'] %
            (self.getDomain(), 'm', get_identifier(media).replace('tt', '')))

        if data:

            cat_ids = self.getCatId(quality)
            table_order = ['name', 'size', None, 'age', 'seeds', 'leechers']

            try:
                html = BeautifulSoup(data)
                resultdiv = html.find('div', attrs={'class': 'tabs'})
                for result in resultdiv.find_all('div', recursive=False):
                    if result.get('id').lower().strip('tab-') not in cat_ids:
                        continue

                    try:
                        for temp in result.find_all('tr'):
                            if temp['class'] is 'firstr' or not temp.get('id'):
                                continue

                            new = {}

                            nr = 0
                            for td in temp.find_all('td'):
                                column_name = table_order[nr]
                                if column_name:

                                    if column_name == 'name':
                                        link = td.find('div', {
                                            'class': 'torrentname'
                                        }).find_all('a')[2]
                                        new['id'] = temp.get('id')[-7:]
                                        new['name'] = link.text
                                        new['url'] = td.find(
                                            'a',
                                            {'href': re.compile('magnet:*')
                                             })['href']
                                        new['detail_url'] = self.urls[
                                            'detail'] % (self.getDomain(),
                                                         link['href'][1:])
                                        new['verified'] = True if td.find(
                                            'i',
                                            {'class': re.compile('verify')
                                             }) else False
                                        new['score'] = 100 if new[
                                            'verified'] else 0
                                    elif column_name is 'size':
                                        new['size'] = self.parseSize(td.text)
                                    elif column_name is 'age':
                                        new['age'] = self.ageToDays(td.text)
                                    elif column_name is 'seeds':
                                        new['seeders'] = try_int(td.text)
                                    elif column_name is 'leechers':
                                        new['leechers'] = try_int(td.text)

                                nr += 1

                            # Only store verified torrents
                            if self.conf(
                                    'only_verified') and not new['verified']:
                                continue

                            results.append(new)
                    except:
                        log.error('Failed parsing KickAssTorrents: %s',
                                  traceback.format_exc())

            except AttributeError:
                log.debug('No search results found.')
Ejemplo n.º 26
0
    def correctRelease(self, nzb=None, media=None, quality=None, **kwargs):

        if media.get('type') != 'movie': return

        media_title = fire_event('searcher.get_search_title',
                                 media,
                                 single=True)

        imdb_results = kwargs.get('imdb_results', False)
        retention = Env.setting('retention', section='nzb')

        if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
            log.info2(
                'Wrong: Outside retention, age is %s, needs %s or lower: %s',
                (nzb['age'], retention, nzb['name']))
            return False

        # Check for required and ignored words
        if not fire_event(
                'searcher.correct_words', nzb['name'], media, single=True):
            return False

        preferred_quality = quality if quality else fire_event(
            'quality.single', identifier=quality['identifier'], single=True)

        # Contains lower quality string
        contains_other = fire_event('searcher.contains_other_quality',
                                    nzb,
                                    movie_year=media['info']['year'],
                                    preferred_quality=preferred_quality,
                                    single=True)
        if contains_other and isinstance(contains_other, dict):
            log.info2(
                'Wrong: %s, looking for %s, found %s',
                (nzb['name'], quality['label'], [x for x in contains_other]
                 if contains_other else 'no quality'))
            return False

        # Contains lower quality string
        if not fire_event('searcher.correct_3d',
                          nzb,
                          preferred_quality=preferred_quality,
                          single=True):
            log.info2(
                'Wrong: %s, %slooking for %s in 3D',
                (nzb['name'],
                 ('' if preferred_quality['custom'].get('3d') else 'NOT '),
                 quality['label']))
            return False

        # File to small
        if nzb['size'] and try_int(preferred_quality['size_min']) > try_int(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_min']))
            return False

        # File to large
        if nzb['size'] and try_int(preferred_quality['size_max']) < try_int(
                nzb['size']):
            log.info2(
                'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.',
                (nzb['name'], preferred_quality['label'], nzb['size'],
                 preferred_quality['size_max']))
            return False

        # Provider specific functions
        get_more = nzb.get('get_more_info')
        if get_more:
            get_more(nzb)

        extra_check = nzb.get('extra_check')
        if extra_check and not extra_check(nzb):
            return False

        if imdb_results:
            return True

        # Check if nzb contains imdb link
        if get_imdb(nzb.get('description', '')) == get_identifier(media):
            return True

        for raw_title in media['info']['titles']:
            for movie_title in possible_titles(raw_title):
                movie_words = re.split('\W+', simplify_string(movie_title))

                if fire_event('searcher.correct_name',
                              nzb['name'],
                              movie_title,
                              single=True):
                    # if no IMDB link, at least check year range 1
                    if len(movie_words) > 2 and fire_event(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            1,
                            single=True):
                        return True

                    # if no IMDB link, at least check year
                    if len(movie_words) <= 2 and fire_event(
                            'searcher.correct_year',
                            nzb['name'],
                            media['info']['year'],
                            0,
                            single=True):
                        return True

        log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'",
                 (nzb['name'], media_title, media['info']['year']))
        return False