Ejemplo n.º 1
0
def sceneScore(nzb_name):

    check_names = [nzb_name]

    # Match names between "
    try: check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0))
    except: pass

    # Match longest name between []
    try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key = len).strip())
    except: pass

    for name in check_names:

        # Strip twice, remove possible file extensions
        name = name.lower().strip(' "\'\.-_\[\]')
        name = re.sub('\.([a-z0-9]{0,4})$', '', name)
        name = name.strip(' "\'\.-_\[\]')

        # Make sure year and groupname is in there
        year = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', name)
        group = re.findall('\-([a-z0-9]+)$', name)

        if len(year) > 0 and len(group) > 0:
            try:
                validate = fireEvent('release.validate', name, single = True)
                if validate and tryInt(validate.get('score')) != 0:
                    log.debug('Release "%s" scored %s, reason: %s', (nzb_name, validate['score'], validate['reasons']))
                    return tryInt(validate.get('score'))
            except:
                log.error('Failed scoring scene: %s', traceback.format_exc())

    return 0
Ejemplo n.º 2
0
    def _search(self, media, quality, results):

        url = self.urls['search'] % self.buildUrl(media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class': 'torrent'})
                for result in entries:

                    link = result.find('a', attrs = {'dir': 'ltr'})
                    url = result.find('a', attrs = {'title': 'Download'})
                    tds = result.find_all('td')
                    size = tds[4].contents[0].strip('\n ')

                    results.append({
                        'id': link['href'].replace('torrents.php?id=', '').split('&')[0],
                        'name': link.contents[0],
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(size),
                        'seeders': tryInt(tds[len(tds)-2].string),
                        'leechers': tryInt(tds[len(tds)-1].string),
                    })
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 3
0
    def _search(self, media, quality, results):

        nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media))

        for nzb in nzbs:

            nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
            enclosure = self.getElement(nzb, "enclosure").attrib
            size = enclosure['length']
            date = self.getTextElement(nzb, "pubDate")

            def extra_check(item):
                full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)

                for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
                    if ignored in full_description:
                        log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
                        return False

                return True

            results.append({
                'id': nzbclub_id,
                'name': toUnicode(self.getTextElement(nzb, "title")),
                'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
                'size': tryInt(size) / 1024 / 1024,
                'url': enclosure['url'].replace(' ', '_'),
                'detail_url': self.getTextElement(nzb, "link"),
                'get_more_info': self.getMoreInfo,
                'extra_check': extra_check
            })
Ejemplo n.º 4
0
    def _searchOnTitle(self, title, media, quality, results):

        url = self.buildUrl(title, media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                resultsTable = html.find('table', attrs = {'id': 'torrents-table'})
                if resultsTable is None:
                    return

                entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'})
                for result in entries:

                    link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
                    url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
                    seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
                    leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
                    torrent_id = link['href'].replace('details?id=', '')

                    results.append({
                        'id': torrent_id,
                        'name': link['title'],
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
                        'seeders': tryInt(seeders.string) if seeders else 0,
                        'leechers': tryInt(leechers.string) if leechers else 0,
                        'get_more_info': self.getMoreInfo,
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 5
0
    def getIMDBids(self):

        movies = []

        urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))

        for url in urls:

            if not urls[url]:
                continue

            rss_movies = self.getRSSData(url)

            for movie in rss_movies:

                description = self.getTextElement(movie, 'description')
                grabs = 0

                for item in movie:
                    if item.attrib.get('name') == 'grabs':
                        grabs = item.attrib.get('value')
                        break

                if int(grabs) > tryInt(self.conf('number_grabs')):
                    title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
                    log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
                    year = re.match(r'.*Year: (\d{4}).*', description).group(1)
                    imdb = self.search(title, year)

                    if imdb and self.isMinimalMovie(imdb):
                        movies.append(imdb['imdb'])

        return movies
Ejemplo n.º 6
0
    def _searchOnTitle(self, title, media, quality, results):

        query = '"%s" %s' % (title, media['info']['year'])

        data = {
            '/browse.php?': None,
            'cata': 'yes',
            'jxt': 8,
            'jxw': 'b',
            'search': query,
        }

        data = self.getJsonData(self.urls['search'], data = data)
        try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
        except: return

        for torrent in torrents:
            results.append({
                'id': torrent['id'],
                'name': torrent['name'],
                'url': self.urls['download'] % (torrent['id'], torrent['fname']),
                'detail_url': self.urls['detail'] % torrent['id'],
                'size': self.parseSize(torrent.get('size')),
                'seeders': tryInt(torrent.get('seed')),
                'leechers': tryInt(torrent.get('leech')),
            })
Ejemplo n.º 7
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '%s %s' % (title, movie['info']['year'])
        params = tryUrlencode({
            'search': q,
            'catid': ','.join([str(x) for x in self.getCatId(quality)]),
            'user': self.conf('username', default = ''),
            'api': self.conf('api_key', default = ''),
        })

        if len(self.conf('custom_tag')) > 0:
            params = '%s&%s' % (params, self.conf('custom_tag'))

        nzbs = self.getJsonData(self.urls['search'] % params)

        if isinstance(nzbs, list):
            for nzb in nzbs:

                results.append({
                    'id': nzb.get('nzbid'),
                    'name': toUnicode(nzb.get('release')),
                    'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
                    'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
                    'url': nzb.get('getnzb'),
                    'detail_url': nzb.get('details'),
                    'description': nzb.get('weblink')
                })
Ejemplo n.º 8
0
    def _searchOnTitle(self, title, media, quality, results):

        url = self.urls['search'] % self.buildUrl(title, media, quality)

        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrenttable'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:

                    link = result.find('td', attrs = {'class': 'name'}).find('a')
                    url = result.find('td', attrs = {'class': 'quickdownload'}).find('a')
                    details = result.find('td', attrs = {'class': 'name'}).find('a')

                    results.append({
                        'id': link['href'].replace('/torrent/', ''),
                        'name': six.text_type(link.string),
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % details['href'],
                        'size': self.parseSize(result.find_all('td')[4].string),
                        'seeders': tryInt(result.find('td', attrs = {'class': 'seeders'}).string),
                        'leechers': tryInt(result.find('td', attrs = {'class': 'leechers'}).string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 9
0
    def _search(self, media, quality, results):

        url = self.urls['search'] % self.buildUrl(media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class': 'torrent'})

                for result in entries:

                    link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
                    url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')
                    size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ')
                    tds = result.find_all('td')

                    results.append({
                        'id': link['href'].replace('torrents.php?torrentid=', ''),
                        'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(size),
                        'seeders': tryInt(tds[len(tds)-2].string),
                        'leechers': tryInt(tds[len(tds)-1].string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 10
0
    def _search(self, media, quality, results):

        data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', '')))

        if data:

            cat_ids = self.getCatId(quality)
            table_order = ['name', 'size', None, 'age', 'seeds', 'leechers']

            try:
                html = BeautifulSoup(data)
                resultdiv = html.find('div', attrs = {'class': 'tabs'})
                for result in resultdiv.find_all('div', recursive = False):
                    if result.get('id').lower().strip('tab-') not in cat_ids:
                        continue

                    try:
                        for temp in result.find_all('tr'):
                            if temp['class'] is 'firstr' or not temp.get('id'):
                                continue

                            new = {}

                            nr = 0
                            for td in temp.find_all('td'):
                                column_name = table_order[nr]
                                if column_name:

                                    if column_name == 'name':
                                        link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
                                        new['id'] = temp.get('id')[-7:]
                                        new['name'] = link.text
                                        new['url'] = td.find('a', {'href': re.compile('magnet:*')})['href']
                                        new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
                                        new['verified'] = True if td.find('i', {'class': re.compile('verify')}) else False
                                        new['score'] = 100 if new['verified'] else 0
                                    elif column_name is 'size':
                                        new['size'] = self.parseSize(td.text)
                                    elif column_name is 'age':
                                        new['age'] = self.ageToDays(td.text)
                                    elif column_name is 'seeds':
                                        new['seeders'] = tryInt(td.text)
                                    elif column_name is 'leechers':
                                        new['leechers'] = tryInt(td.text)

                                nr += 1

                            # Only store verified torrents
                            if self.conf('only_verified') and not new['verified']:
                                continue

                            results.append(new)
                    except:
                        log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())

            except AttributeError:
                log.debug('No search results found.')
Ejemplo n.º 11
0
    def parseMovie(self, movie):

        movie_data = {}
        try:

            try:
                if isinstance(movie, (str, unicode)):
                    movie = json.loads(movie)
            except ValueError:
                log.info('No proper json to decode')
                return movie_data

            if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False':
                return movie_data

            if movie.get('Type').lower() != 'movie':
                return movie_data

            tmp_movie = movie.copy()
            for key in tmp_movie:
                if tmp_movie.get(key).lower() == 'n/a':
                    del movie[key]

            year = tryInt(movie.get('Year', ''))

            movie_data = {
                'type': 'movie',
                'via_imdb': True,
                'titles': [movie.get('Title')] if movie.get('Title') else [],
                'original_title': movie.get('Title'),
                'images': {
                    'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [],
                },
                'rating': {
                    'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))),
                    #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))),
                },
                'imdb': str(movie.get('imdbID', '')),
                'mpaa': str(movie.get('Rated', '')),
                'runtime': self.runtimeToMinutes(movie.get('Runtime', '')),
                'released': movie.get('Released'),
                'year': year if isinstance(year, int) else None,
                'plot': movie.get('Plot'),
                'genres': splitString(movie.get('Genre', '')),
                'directors': splitString(movie.get('Director', '')),
                'writers': splitString(movie.get('Writer', '')),
                'actors': splitString(movie.get('Actors', '')),
            }
            movie_data = dict((k, v) for k, v in movie_data.items() if v)
        except:
            log.error('Failed parsing IMDB API json: %s', traceback.format_exc())

        return movie_data
Ejemplo n.º 12
0
    def _search(self, movie, quality, results):

        data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal')))

        if data:
            try:
                soup = BeautifulSoup(data)

                if soup.find('error'):
                    log.error(soup.find('error').get_text())
                    return

                authkey = soup.find('authkey').get_text()
                entries = soup.find_all('torrent')

                for entry in entries:

                    torrentscore = 0
                    torrent_id = entry.find('id').get_text()
                    name = entry.find('name').get_text()
                    year = entry.find('year').get_text()
                    releasegroup = entry.find('releasegroup').get_text()
                    resolution = entry.find('resolution').get_text()
                    encoding = entry.find('encoding').get_text()
                    freeleech = entry.find('freeleech').get_text()
                    torrent_desc = '/ %s / %s / %s ' % (releasegroup, resolution, encoding)

                    if freeleech == '0.25' and self.conf('prefer_internal'):
                        torrent_desc += '/ Internal'
                        torrentscore += 200

                    if encoding == 'x264' and self.conf('favor') in ['encode', 'both']:
                        torrentscore += 300
                    if re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']:
                        torrentscore += 200

                    results.append({
                        'id': torrent_id,
                        'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
                        'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': tryInt(entry.find('size').get_text()) / 1048576,
                        'seeders': tryInt(entry.find('seeders').get_text()),
                        'leechers': tryInt(entry.find('leechers').get_text()),
                        'score': torrentscore
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 13
0
    def getHosts(self):

        uses = splitString(str(self.conf('use')), clean = False)
        hosts = splitString(self.conf('host'), clean = False)
        api_keys = splitString(self.conf('api_key'), clean = False)
        extra_score = splitString(self.conf('extra_score'), clean = False)
        custom_tags = splitString(self.conf('custom_tag'), clean = False)

        list = []
        for nr in range(len(hosts)):

            try: key = api_keys[nr]
            except: key = ''

            try: host = hosts[nr]
            except: host = ''

            try: score = tryInt(extra_score[nr])
            except: score = 0

            try: custom_tag = custom_tags[nr]
            except: custom_tag = ''

            list.append({
                'use': uses[nr],
                'host': host,
                'api_key': key,
                'extra_score': score,
                'custom_tag': custom_tag
            })

        return list
Ejemplo n.º 14
0
    def checkFilesChanged(self, files, unchanged_for = 60):
        now = time.time()
        file_too_new = False

        file_time = []
        for cur_file in files:

            # File got removed while checking
            if not os.path.isfile(cur_file):
                file_too_new = now
                break

            # File has changed in last 60 seconds
            file_time = self.getFileTimes(cur_file)
            for t in file_time:
                if t > now - unchanged_for:
                    file_too_new = tryInt(time.time() - t)
                    break

            if file_too_new:
                break

        if file_too_new:
            try:
                time_string = time.ctime(file_time[0])
            except:
                try:
                    time_string = time.ctime(file_time[1])
                except:
                    time_string = 'unknown'

            return file_too_new, time_string

        return False, None
Ejemplo n.º 15
0
    def getIMDBids(self):

        movies = []

        enablers = [tryInt(x) for x in splitString(self.conf("automation_urls_use"))]

        index = -1
        for rss_url in splitString(self.conf("automation_urls")):

            index += 1
            if not enablers[index]:
                continue

            rss_movies = self.getRSSData(rss_url, headers={"Referer": ""})

            for movie in rss_movies:

                nameyear = fireEvent("scanner.name_year", self.getTextElement(movie, "title"), single=True)
                imdb = self.search(nameyear.get("name"), nameyear.get("year"), imdb_only=True)

                if not imdb:
                    continue

                movies.append(imdb)

        return movies
Ejemplo n.º 16
0
    def get(self, nr = 0, **kwargs):

        nr = tryInt(nr)
        current_path = None

        total = 1
        for x in range(0, 50):

            path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')

            # Check see if the log exists
            if not os.path.isfile(path):
                total = x - 1
                break

            # Set current path
            if x is nr:
                current_path = path

        log_content = ''
        if current_path:
            f = open(current_path, 'r')
            log_content = f.read()
        logs = self.toList(log_content)

        return {
            'success': True,
            'log': logs,
            'total': total,
        }
Ejemplo n.º 17
0
    def _searchOnTitle(self, title, media, quality, results):

        search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']

        # Create search parameters
        search_params = self.buildUrl(title, media, quality)

        smin = quality.get('size_min')
        smax = quality.get('size_max')
        if smin and smax:
            search_params += ' size %sm - %sm' % (smin, smax)

        min_seeds = tryInt(self.conf('minimal_seeds'))
        if min_seeds:
            search_params += ' seed > %s' % (min_seeds - 1)

        rss_data = self.getRSSData(search_url % search_params)

        if rss_data:
            try:

                for result in rss_data:

                    name = self.getTextElement(result, 'title')
                    detail_url = self.getTextElement(result, 'link')
                    description = self.getTextElement(result, 'description')

                    magnet = splitString(detail_url, '/')[-1]
                    magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce'))

                    reg = re.search('Size: (?P<size>\d+) MB Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description))
                    size = reg.group('size')
                    seeds = reg.group('seeds').replace(',', '')
                    peers = reg.group('peers').replace(',', '')

                    results.append({
                        'id': magnet,
                        'name': six.text_type(name),
                        'url': magnet_url,
                        'detail_url': detail_url,
                        'size': tryInt(size),
                        'seeders': tryInt(seeds),
                        'leechers': tryInt(peers),
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 18
0
    def updateProgress(self, folder, to_go):

        pr = self.in_progress[folder]
        if to_go < pr['to_go']:
            pr['to_go'] = to_go

        avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go'])
        pr['eta'] = tryInt(avg * pr['to_go'])
Ejemplo n.º 19
0
def providerScore(provider):

    try:
        score = tryInt(Env.setting('extra_score', section = provider.lower(), default = 0))
    except:
        score = 0

    return score
Ejemplo n.º 20
0
    def setCrons(self):

        fireEvent('schedule.remove', 'manage.update_library')
        refresh = tryInt(self.conf('library_refresh_interval'))
        if refresh > 0:
            fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True)

        return True
Ejemplo n.º 21
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % self.buildUrl(title, movie, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)

            try:
                result_table = html.find('table', attrs = {'class': 'koptekst'})
                if not result_table or 'nothing found!' in data.lower():
                    return

                entries = result_table.find_all('tr')
                for result in entries[1:]:

                    all_cells = result.find_all('td')

                    torrent = all_cells[self.torrent_name_cell].find('a')
                    download = all_cells[self.torrent_download_cell].find('a')

                    torrent_id = torrent['href']
                    torrent_id = torrent_id.replace('details.php?id=', '')
                    torrent_id = torrent_id.replace('&hit=1', '')

                    torrent_name = torrent.getText()

                    torrent_size = self.parseSize(all_cells[8].getText())
                    torrent_seeders = tryInt(all_cells[10].getText())
                    torrent_leechers = tryInt(all_cells[11].getText())
                    torrent_url = self.urls['baseurl'] % download['href']
                    torrent_detail_url = self.urls['baseurl'] % torrent['href']

                    results.append({
                        'id': torrent_id,
                        'name': torrent_name,
                        'size': torrent_size,
                        'seeders': torrent_seeders,
                        'leechers': torrent_leechers,
                        'url': torrent_url,
                        'detail_url': torrent_detail_url,
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 22
0
    def getMeta(self, filename):

        try:
            p = enzyme.parse(filename)

            # Video codec
            vc = ('H264' if p.video[0].codec == 'AVC1' else p.video[0].codec)

            # Audio codec
            ac = p.audio[0].codec
            try: ac = self.audio_codec_map.get(p.audio[0].codec)
            except: pass

            # Find title in video headers
            titles = []

            try:
                if p.title and self.findYear(p.title):
                    titles.append(ss(p.title))
            except:
                log.error('Failed getting title from meta: %s', traceback.format_exc())

            for video in p.video:
                try:
                    if video.title and self.findYear(video.title):
                        titles.append(ss(video.title))
                except:
                    log.error('Failed getting title from meta: %s', traceback.format_exc())

            return {
                'titles': list(set(titles)),
                'video': vc,
                'audio': ac,
                'resolution_width': tryInt(p.video[0].width),
                'resolution_height': tryInt(p.video[0].height),
                'audio_channels': p.audio[0].channels,
            }
        except enzyme.exceptions.ParseError:
            log.debug('Failed to parse meta for %s', filename)
        except enzyme.exceptions.NoParserError:
            log.debug('No parser found for %s', filename)
        except:
            log.debug('Failed parsing %s', filename)

        return {}
Ejemplo n.º 23
0
    def runtimeToMinutes(self, runtime_str):
        runtime = 0

        regex = '(\d*.?\d+).(h|hr|hrs|mins|min)+'
        matches = re.findall(regex, runtime_str)
        for match in matches:
            nr, size = match
            runtime += tryInt(nr) * (60 if 'h' is str(size)[0] else 1)

        return runtime
Ejemplo n.º 24
0
    def ageToDays(self, age_str):
        age = 0
        age_str = age_str.replace('&nbsp;', ' ')

        regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+'
        matches = re.findall(regex, age_str)
        for match in matches:
            nr, size = match
            mult = 1
            if size == 'week':
                mult = 7
            elif size == 'month':
                mult = 30.5
            elif size == 'year':
                mult = 365

            age += tryInt(nr) * mult

        return tryInt(age)
Ejemplo n.º 25
0
                    def extra_check(item):
                        parts = re.search("available:.(?P<parts>\d+)./.(?P<total>\d+)", info.text)
                        total = float(tryInt(parts.group("total")))
                        parts = float(tryInt(parts.group("parts")))

                        if (total / parts) < 1 and (
                            (total / parts) < 0.95
                            or (
                                (total / parts) >= 0.95
                                and not ("par2" in info.text.lower() or "pa3" in info.text.lower())
                            )
                        ):
                            log.info2("Wrong: '%s', not complete: %s out of %s", (item["name"], parts, total))
                            return False

                        if "requires password" in info.text.lower():
                            log.info2("Wrong: '%s', passworded", (item["name"]))
                            return False

                        return True
Ejemplo n.º 26
0
def namePositionScore(nzb_name, movie_name):
    score = 0

    nzb_words = re.split('\W+', simplifyString(nzb_name))
    qualities = fireEvent('quality.all', single = True)

    try:
        nzb_name = re.search(r'([\'"])[^\1]*\1', nzb_name).group(0)
    except:
        pass

    name_year = fireEvent('scanner.name_year', nzb_name, single = True)

    # Give points for movies beginning with the correct name
    split_by = simplifyString(movie_name)
    name_split = []
    if len(split_by) > 0:
        name_split = simplifyString(nzb_name).split(split_by)
        if name_split[0].strip() == '':
            score += 10

    # If year is second in line, give more points
    if len(name_split) > 1 and name_year:
        after_name = name_split[1].strip()
        if tryInt(after_name[:4]) == name_year.get('year', None):
            score += 10
            after_name = after_name[4:]

        # Give -point to crap between year and quality
        found_quality = None
        for quality in qualities:
            # Main in words
            if quality['identifier'] in nzb_words:
                found_quality = quality['identifier']

            # Alt in words
            for alt in quality['alternative']:
                if alt in nzb_words:
                    found_quality = alt
                    break

        if not found_quality:
            return score - 20

        allowed = []
        for value in name_scores:
            name, sc = value.split(':')
            allowed.append(name)

        inbetween = re.split('\W+', after_name.split(found_quality)[0].strip())

        score -= (10 * len(set(inbetween) - set(allowed)))

    return score
Ejemplo n.º 27
0
    def _search(self, movie, quality, results):

        match = re.match(r'tt(\d{7})', getIdentifier(movie))

        data = self._post_query(imdb = {'id': match.group(1)})

        if data:
            try:
                for result in data:
                    results.append({
                        'id': result['id'],
                        'name': result['name'],
                        'url': self.urls['download'] % (result['id'], self.conf('passkey')),
                        'detail_url': self.urls['detail'] % result['id'],
                        'size': tryInt(result['size']) / 1024 / 1024,
                        'seeders': tryInt(result['seeders']),
                        'leechers': tryInt(result['leechers'])
                    })
            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 28
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls["search"] % (
            tryUrlencode("%s %s" % (title.replace(":", ""), movie["info"]["year"])),
            self.getCatId(quality)[0],
        )
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find("table", attrs={"border": "1"})
                if not result_table:
                    return

                entries = result_table.find_all("tr")

                for result in entries[1:]:
                    cells = result.find_all("td")

                    link = cells[1].find("a", attrs={"class": "index"})

                    full_id = link["href"].replace("details.php?id=", "")
                    torrent_id = full_id[:6]
                    name = toUnicode(link.get("title", link.contents[0]).encode("ISO-8859-1")).strip()

                    results.append(
                        {
                            "id": torrent_id,
                            "name": name,
                            "url": self.urls["download"] % (torrent_id, name),
                            "detail_url": self.urls["detail"] % torrent_id,
                            "size": self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
                            "seeders": tryInt(cells[8].find("span").contents[0]),
                            "leechers": tryInt(cells[9].find("span").contents[0]),
                        }
                    )

            except:
                log.error("Failed to parsing %s: %s", (self.getName(), traceback.format_exc()))
Ejemplo n.º 29
0
    def fill(self):

        try:
            db = get_db()

            order = 0
            for q in self.qualities:

                existing = None
                try:
                    existing = db.get('quality', q.get('identifier'))
                except RecordNotFound:
                    pass

                if not existing:
                    db.insert({
                        '_t': 'quality',
                        'order': order,
                        'identifier': q.get('identifier'),
                        'size_min': tryInt(q.get('size')[0]),
                        'size_max': tryInt(q.get('size')[1]),
                    })

                    log.info('Creating profile: %s', q.get('label'))
                    db.insert({
                        '_t': 'profile',
                        'order': order + 20,  # Make sure it goes behind other profiles
                        'core': True,
                        'qualities': [q.get('identifier')],
                        'label': toUnicode(q.get('label')),
                        'finish': [True],
                        'wait_for': [0],
                    })

                order += 1

            return True
        except:
            log.error('Failed: %s', traceback.format_exc())

        return False
Ejemplo n.º 30
0
    def _search(self, media, quality, results):

        query = self.buildUrl(media, quality)

        url = "%s&%s" % (self.urls['search'], query)

        data = self.getHTMLData(url)

        if data:
            # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
            split_data = data.partition('-->')
            if '## SELECT COUNT(' in split_data[0]:
                data = split_data[2]

            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'width': '750', 'class': ''})
                if result_table is None:
                    return

                entries = result_table.find_all('tr')
                for result in entries[1:]:

                    cells = result.find_all('td')
                    link = cells[2].find('a')
                    torrent_id = link['href'].replace('/details.php?id=', '')

                    results.append({
                        'id': torrent_id,
                        'name': link.contents[0].get_text(),
                        'url': cells[0].find('a')['href'],
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': self.parseSize(cells[6].get_text()),
                        'seeders': tryInt(cells[8].string),
                        'leechers': tryInt(cells[9].string),
                        'get_more_info': self.getMoreInfo,
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))