Exemple #1
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '%s %s' % (title, movie['info']['year'])
        params = try_url_encode({
            'search': q,
            'catid': ','.join([str(x) for x in self.getCatId(quality)]),
            'user': self.conf('username', default = ''),
            'api': self.conf('api_key', default = ''),
        })

        if len(self.conf('custom_tag')) > 0:
            params = '%s&%s' % (params, self.conf('custom_tag'))

        nzbs = self.getJsonData(self.urls['search'] % params)

        if isinstance(nzbs, list):
            for nzb in nzbs:

                results.append({
                    'id': nzb.get('nzbid'),
                    'name': to_unicode(nzb.get('release')),
                    'age': self.calculateAge(try_int(nzb.get('usenetage'))),
                    'size': try_int(nzb.get('sizebytes')) / 1024 / 1024,
                    'url': nzb.get('getnzb'),
                    'detail_url': nzb.get('details'),
                    'description': nzb.get('weblink')
                })
Exemple #2
0
    def parseMovie(self, movie):

        movie_data = {}
        try:

            try:
                if isinstance(movie, str):
                    movie = json.loads(movie)
            except ValueError:
                log.info('No proper json to decode')
                return movie_data

            if movie.get('Response') == 'Parse Error' or movie.get(
                    'Response') == 'False':
                return movie_data

            if movie.get('Type').lower() != 'movie':
                return movie_data

            tmp_movie = movie.copy()
            for key in tmp_movie:
                tmp_movie_elem = tmp_movie.get(key)
                if not isinstance(tmp_movie_elem,
                                  str) or tmp_movie_elem.lower() == 'n/a':
                    del movie[key]

            year = try_int(movie.get('Year', ''))

            movie_data = {
                'type': 'movie',
                'via_imdb': True,
                'titles': [movie.get('Title')] if movie.get('Title') else [],
                'original_title': movie.get('Title'),
                'images': {
                    'poster': [movie.get('Poster', '')] if movie.get('Poster')
                    and len(movie.get('Poster', '')) > 4 else [],
                },
                'rating': {
                    'imdb':
                    (try_float(movie.get('imdbRating', 0)),
                     try_int(movie.get('imdbVotes', '').replace(',', ''))),
                    #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))),
                },
                'imdb': str(movie.get('imdbID', '')),
                'mpaa': str(movie.get('Rated', '')),
                'runtime': self.runtimeToMinutes(movie.get('Runtime', '')),
                'released': movie.get('Released'),
                'year': year if isinstance(year, int) else None,
                'plot': movie.get('Plot'),
                'genres': split_string(movie.get('Genre', '')),
                'directors': split_string(movie.get('Director', '')),
                'writers': split_string(movie.get('Writer', '')),
                'actors': split_string(movie.get('Actors', '')),
            }
            movie_data = dict((k, v) for k, v in list(movie_data.items()) if v)
        except:
            log.error('Failed parsing IMDB API json: %s',
                      traceback.format_exc())

        return movie_data
Exemple #3
0
    def getWatchlist(self):

        enablers = [
            try_int(x) for x in split_string(self.conf('automation_urls_use'))
        ]
        urls = split_string(self.conf('automation_urls'))

        index = -1
        movies = []
        for username in urls:

            index += 1
            if not enablers[index]:
                continue

            soup = BeautifulSoup(self.getHTMLData(self.url % (username, 1)))

            pagination = soup.find_all('li', attrs={'class': 'paginate-page'})
            number_of_pages = try_int(
                pagination[-1].find('a').get_text()) if pagination else 1
            pages = list(range(1, number_of_pages))

            for page in pages:
                soup = BeautifulSoup(
                    self.getHTMLData(self.url % (username, page)))
                movies += self.getMoviesFromHTML(soup)

        return movies
Exemple #4
0
    def _search(self, movie, quality, results):

        match = re.match(r'tt(\d{7})', get_identifier(movie))

        data = self._post_query(imdb={'id': match.group(1)})

        if data:
            try:
                for result in data:
                    results.append({
                        'id':
                        result['id'],
                        'name':
                        result['name'],
                        'url':
                        self.urls['download'] %
                        (result['id'], self.conf('passkey')),
                        'detail_url':
                        self.urls['detail'] % result['id'],
                        'size':
                        try_int(result['size']) / 1024 / 1024,
                        'seeders':
                        try_int(result['seeders']),
                        'leechers':
                        try_int(result['leechers'])
                    })
            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Exemple #5
0
    def getIMDBids(self):

        movies = []

        urls = dict(list(zip(split_string(self.conf('automation_urls')),
                             [try_int(x) for x in split_string(self.conf('automation_urls_use'))])))

        for url in urls:

            if not urls[url]:
                continue

            rss_movies = self.getRSSData(url)

            for movie in rss_movies:

                description = self.get_text_element(movie, 'description')
                grabs = 0

                for item in movie:
                    if item.attrib.get('name') == 'grabs':
                        grabs = item.attrib.get('value')
                        break

                if int(grabs) > try_int(self.conf('number_grabs')):
                    title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
                    log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
                    year = re.match(r'.*Year: (\d{4}).*', description).group(1)
                    imdb = self.search(title, year)

                    if imdb and self.isMinimalMovie(imdb):
                        movies.append(imdb['imdb'])

        return movies
Exemple #6
0
    def _searchOnTitle(self, title, media, quality, results):

        url = self.urls['search'] % self.buildUrl(title, media, quality)

        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrenttable'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:

                    link = result.find('td', attrs = {'class': 'name'}).find('a')
                    url = result.find('td', attrs = {'class': 'quickdownload'}).find('a')
                    details = result.find('td', attrs = {'class': 'name'}).find('a')

                    results.append({
                        'id': link['href'].replace('/torrent/', ''),
                        'name': six.text_type(link.string),
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % details['href'],
                        'size': self.parseSize(result.find_all('td')[4].string),
                        'seeders': try_int(result.find('td', attrs={'class': 'seeders'}).string),
                        'leechers': try_int(result.find('td', attrs={'class': 'leechers'}).string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Exemple #7
0
    def _search(self, movie, quality, results):
        limit = 10
        page = 1
        data = self.getJsonData(self.urls['search'] %
                                (get_identifier(movie), limit, page))

        if data:
            movie_count = try_int(data['data']['movie_count'])

            if movie_count == 0:
                log.debug('%s - found no results', (self.getName()))
            else:

                movie_results = data['data']['movies']
                for i in range(0, len(movie_results)):
                    result = data['data']['movies'][i]
                    name = result['title']
                    year = result['year']
                    detail_url = result['url']

                    for torrent in result['torrents']:
                        t_quality = torrent['quality']

                        if t_quality in quality['label']:
                            hash = torrent['hash']
                            size = try_int(torrent['size_bytes'] / 1048576)
                            seeders = try_int(torrent['seeds'])
                            leechers = try_int(torrent['peers'])
                            pubdate = torrent[
                                'date_uploaded']  # format: 2017-02-17 18:40:03
                            pubdate = datetime.strptime(
                                pubdate, '%Y-%m-%d %H:%M:%S')
                            age = (datetime.now() - pubdate).days

                            results.append({
                                'id':
                                random.randint(100, 9999),
                                'name':
                                '%s (%s) %s %s %s' %
                                (name, year, 'YTS', t_quality, 'BR-Rip'),
                                'url':
                                self.make_magnet(hash, name),
                                'size':
                                size,
                                'seeders':
                                seeders,
                                'leechers':
                                leechers,
                                'age':
                                age,
                                'detail_url':
                                detail_url,
                                'score':
                                1
                            })

        return
Exemple #8
0
    def _searchOnTitle(self, title, media, quality, results):

        url = self.buildUrl(title, media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                resultsTable = html.find('table',
                                         attrs={'id': 'torrents-table'})
                if resultsTable is None:
                    return

                entries = resultsTable.find_all('tr',
                                                attrs={'class': 'tt_row'})
                for result in entries:

                    link = result.find('td', attrs={
                        'class': 'ttr_name'
                    }).find('a')
                    url = result.find('td', attrs={'class': 'td_dl'}).find('a')
                    seeders = result.find('td', attrs={
                        'class': 'ttr_seeders'
                    }).find('a')
                    leechers = result.find('td',
                                           attrs={
                                               'class': 'ttr_leechers'
                                           }).find('a')
                    torrent_id = link['href'].replace('details?id=', '')

                    results.append({
                        'id':
                        torrent_id,
                        'name':
                        link['title'],
                        'url':
                        self.urls['download'] % url['href'],
                        'detail_url':
                        self.urls['detail'] % torrent_id,
                        'size':
                        self.parseSize(
                            result.find('td', attrs={
                                'class': 'ttr_size'
                            }).contents[0]),
                        'seeders':
                        try_int(seeders.string) if seeders else 0,
                        'leechers':
                        try_int(leechers.string) if leechers else 0,
                        'get_more_info':
                        self.getMoreInfo,
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Exemple #9
0
    def _searchOnTitle(self, title, media, quality, results):

        search_url = self.urls['search']

        # Create search parameters
        search_params = self.buildUrl(title, media, quality)

        min_seeds = try_int(self.conf('minimal_seeds'))
        if min_seeds:
            search_params += ' seed > %s' % (min_seeds - 1)

        rss_data = self.getRSSData(search_url % search_params)

        if rss_data:
            try:

                for result in rss_data:

                    name = self.get_text_element(result, 'title')
                    detail_url = self.get_text_element(result, 'link')
                    description = self.get_text_element(result, 'description')

                    magnet = split_string(detail_url, '/')[-1]
                    magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (
                        magnet.upper(), try_url_encode(name),
                        try_url_encode(
                            'udp://tracker.openbittorrent.com/announce'))

                    reg = re.search(
                        'Size: (?P<size>\d+) (?P<unit>[KMG]B) Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)',
                        six.text_type(description))
                    size = reg.group('size')
                    unit = reg.group('unit')
                    seeds = reg.group('seeds').replace(',', '')
                    peers = reg.group('peers').replace(',', '')

                    multiplier = 1
                    if unit == 'GB':
                        multiplier = 1000
                    elif unit == 'KB':
                        multiplier = 0

                    results.append({
                        'id': magnet,
                        'name': six.text_type(name),
                        'url': magnet_url,
                        'detail_url': detail_url,
                        'size': try_int(size) * multiplier,
                        'seeders': try_int(seeds),
                        'leechers': try_int(peers),
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Exemple #10
0
    def _search(self, media, quality, results):

        url = self.urls['search'] % self.buildUrl(media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table',
                                         attrs={'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr',
                                                attrs={'class': 'torrent'})

                for result in entries:

                    link = result.find('span',
                                       attrs={
                                           'class': 'torrent_name_link'
                                       }).parent
                    url = result.find('td', attrs={
                        'class': 'torrent_td'
                    }).find('a')
                    size = result.find('td', attrs={
                        'class': 'size'
                    }).contents[0].strip('\n ')
                    tds = result.find_all('td')

                    results.append({
                        'id':
                        link['href'].replace('torrents.php?torrentid=', ''),
                        'name':
                        six.text_type(link.span.string).translate(
                            {ord(six.u('\xad')): None}),
                        'url':
                        self.urls['download'] % url['href'],
                        'detail_url':
                        self.urls['download'] % link['href'],
                        'size':
                        self.parseSize(size),
                        'seeders':
                        try_int(tds[len(tds) - 2].string),
                        'leechers':
                        try_int(tds[len(tds) - 1].string),
                    })

            except:
                log.error('Failed to parsing %s: %s',
                          (self.getName(), traceback.format_exc()))
Exemple #11
0
    def _search(self, media, quality, results):

        query = self.buildUrl(media, quality)

        url = "%s&%s" % (self.urls['search'], query)

        data = self.getHTMLData(url, headers=self.getRequestHeaders())

        if data:
            # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
            split_data = data.partition('-->')
            if '## SELECT COUNT(' in split_data[0]:
                data = split_data[2]

            html = BeautifulSoup(data, 'html.parser')

            try:
                result_tables = html.find_all('table',
                                              attrs={
                                                  'width': '800',
                                                  'class': ''
                                              })
                if result_tables is None:
                    return

                # Take first result
                result_table = result_tables[0]

                if result_table is None:
                    return

                entries = result_table.find_all('tr')
                for result in entries[1:]:

                    cells = result.find_all('td')
                    link = cells[2].find('a')
                    torrent_id = link['href'].split('id=')[1]

                    results.append({
                        'id': torrent_id,
                        'name': link.contents[0].get_text(),
                        'url': self.urls['download'] % torrent_id,
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': self.parseSize(cells[6].get_text()),
                        'seeders': try_int(cells[8].string),
                        'leechers': try_int(cells[9].string),
                        'get_more_info': self.getMoreInfo,
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Exemple #12
0
    def getMeta(self, filename):

        try:
            p = enzyme.parse(filename)

            # Video codec
            vc = ('H264' if p.video[0].codec == 'AVC1' else
                  'x265' if p.video[0].codec == 'HEVC' else p.video[0].codec)

            # Audio codec
            ac = p.audio[0].codec
            try:
                ac = self.audio_codec_map.get(p.audio[0].codec)
            except:
                pass

            # Find title in video headers
            titles = []

            try:
                if p.title and self.findYear(p.title):
                    titles.append(ss(p.title))
            except:
                log.error('Failed getting title from meta: %s',
                          traceback.format_exc())

            for video in p.video:
                try:
                    if video.title and self.findYear(video.title):
                        titles.append(ss(video.title))
                except:
                    log.error('Failed getting title from meta: %s',
                              traceback.format_exc())

            return {
                'titles': list(set(titles)),
                'video': vc,
                'audio': ac,
                'resolution_width': try_int(p.video[0].width),
                'resolution_height': try_int(p.video[0].height),
                'audio_channels': p.audio[0].channels,
            }
        except enzyme.exceptions.ParseError:
            log.debug('Failed to parse meta for %s', filename)
        except enzyme.exceptions.NoParserError:
            log.debug('No parser found for %s', filename)
        except:
            log.debug('Failed parsing %s', filename)

        return {}
Exemple #13
0
    def fill(self, reorder=False):

        try:
            db = get_db()

            order = 0
            for q in self.qualities:

                existing = None
                try:
                    existing = db.get('quality',
                                      q.get('identifier'),
                                      with_doc=reorder)
                except RecordNotFound:
                    pass

                if not existing:
                    db.insert({
                        '_t': 'quality',
                        'order': order,
                        'identifier': q.get('identifier'),
                        'size_min': try_int(q.get('size')[0]),
                        'size_max': try_int(q.get('size')[1]),
                    })

                    log.info('Creating profile: %s', q.get('label'))
                    db.insert({
                        '_t': 'profile',
                        'order':
                        order + 20,  # Make sure it goes behind other profiles
                        'core': True,
                        'qualities': [q.get('identifier')],
                        'label': to_unicode(q.get('label')),
                        'finish': [True],
                        'wait_for': [0],
                    })
                elif reorder:
                    log.info2('Updating quality order')
                    existing['doc']['order'] = order
                    db.update(existing['doc'])

                order += 1

            return True
        except:
            log.error('Failed: %s', traceback.format_exc())

        return False
Exemple #14
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % self.buildUrl(title, movie, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data,
                                 'html.parser',
                                 parse_only=self.only_tables_tags)

            try:
                result_table = html.find('table', attrs={'class': 'koptekst'})
                if not result_table or 'nothing found!' in data.lower():
                    return

                entries = result_table.find_all('tr')
                for result in entries[1:]:

                    all_cells = result.find_all('td')

                    torrent = all_cells[self.torrent_name_cell].find('a')
                    download = all_cells[self.torrent_download_cell].find('a')

                    torrent_id = torrent['href']
                    torrent_id = torrent_id.replace('details.php?id=', '')
                    torrent_id = torrent_id.replace('&hit=1', '')

                    torrent_name = torrent.getText()

                    torrent_size = self.parseSize(all_cells[8].getText())
                    torrent_seeders = try_int(all_cells[10].getText())
                    torrent_leechers = try_int(all_cells[11].getText())
                    torrent_url = self.urls['baseurl'] % download['href']
                    torrent_detail_url = self.urls['baseurl'] % torrent['href']

                    results.append({
                        'id': torrent_id,
                        'name': torrent_name,
                        'size': torrent_size,
                        'seeders': torrent_seeders,
                        'leechers': torrent_leechers,
                        'url': torrent_url,
                        'detail_url': torrent_detail_url,
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Exemple #15
0
    def _search(self, media, quality, results):

        nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media))

        for nzb in nzbs:

            nzbclub_id = try_int(
                self.get_text_element(
                    nzb, "link").split('/nzb_view/')[1].split('/')[0])
            enclosure = self.get_element(nzb, "enclosure").attrib
            size = enclosure['length']
            date = self.get_text_element(nzb, "pubDate")

            def extra_check(item):
                full_description = self.getCache('nzbclub.%s' % nzbclub_id,
                                                 item['detail_url'],
                                                 cache_timeout=25920000)

                for ignored in [
                        'ARCHIVE inside ARCHIVE', 'Incomplete',
                        'repair impossible'
                ]:
                    if ignored in full_description:
                        log.info(
                            'Wrong: Seems to be passworded or corrupted files: %s',
                            item['name'])
                        return False

                return True

            results.append({
                'id':
                nzbclub_id,
                'name':
                to_unicode(self.get_text_element(nzb, "title")),
                'age':
                self.calculateAge(int(time.mktime(parse(date).timetuple()))),
                'size':
                try_int(size) / 1024 / 1024,
                'url':
                enclosure['url'].replace(' ', '_'),
                'detail_url':
                self.get_text_element(nzb, "link"),
                'get_more_info':
                self.getMoreInfo,
                'extra_check':
                extra_check
            })
Exemple #16
0
    def checkFilesChanged(self, files, unchanged_for=60):
        now = time.time()
        file_too_new = False

        file_time = []
        for cur_file in files:

            # File got removed while checking
            if not os.path.isfile(cur_file):
                file_too_new = now
                break

            # File has changed in last 60 seconds
            file_time = self.getFileTimes(cur_file)
            for t in file_time:
                if t > now - unchanged_for:
                    file_too_new = try_int(time.time() - t)
                    break

            if file_too_new:
                break

        if file_too_new:
            try:
                time_string = time.ctime(file_time[0])
            except:
                try:
                    time_string = time.ctime(file_time[1])
                except:
                    time_string = 'unknown'

            return file_too_new, time_string

        return False, None
Exemple #17
0
    def get(self, nr = 0, **kwargs):

        nr = try_int(nr)
        current_path = None

        total = 1
        for x in range(0, 50):

            path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')

            # Check see if the log exists
            if not os.path.isfile(path):
                total = x - 1
                break

            # Set current path
            if x is nr:
                current_path = path

        log_content = ''
        if current_path:
            f = open(current_path, 'r')
            log_content = f.read()
        logs = self.toList(log_content)

        return {
            'success': True,
            'log': logs,
            'total': total,
        }
Exemple #18
0
    def _searchOnTitle(self, title, movie, quality, results):

        movieTitle = try_url_encode(
            '%s %s' % (title.replace(':', ''), movie['info']['year']))
        url = self.urls['search'] % (self.getSceneOnly(), movieTitle)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table',
                                         attrs={'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr',
                                                attrs={'class': 'torrent'})
                for result in entries:

                    link = result.find('a', attrs={'dir': 'ltr'})
                    url = result.find('span', attrs={
                        'title': 'Download'
                    }).parent
                    tds = result.find_all('td')
                    size = tds[5].contents[0].strip('\n ')

                    results.append({
                        'id':
                        link['href'].replace('torrents.php?id=',
                                             '').split('&')[0],
                        'name':
                        link.contents[0],
                        'url':
                        self.urls['download'] % url['href'],
                        'detail_url':
                        self.urls['download'] % link['href'],
                        'size':
                        self.parseSize(size),
                        'seeders':
                        try_int(tds[len(tds) - 2].string),
                        'leechers':
                        try_int(tds[len(tds) - 1].string),
                    })
            except:
                log.error('Failed to parsing %s: %s',
                          (self.getName(), traceback.format_exc()))
Exemple #19
0
    def updateProgress(self, folder, to_go):

        pr = self.in_progress[folder]
        if to_go < pr['to_go']:
            pr['to_go'] = to_go

        avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go'])
        pr['eta'] = try_int(avg * pr['to_go'])
Exemple #20
0
def providerScore(provider):

    try:
        score = try_int(
            Env.setting('extra_score', section=provider.lower(), default=0))
    except:
        score = 0

    return score
Exemple #21
0
    def runtimeToMinutes(self, runtime_str):
        runtime = 0

        regex = '(\d*.?\d+).(h|hr|hrs|mins|min)+'
        matches = re.findall(regex, runtime_str)
        for match in matches:
            nr, size = match
            runtime += try_int(nr) * (60 if 'h' is str(size)[0] else 1)

        return runtime
Exemple #22
0
    def _search(self, movie, quality, results):
        data = self.getJsonData(self.urls['search'] % (
        self.conf('apikey'), self.conf('username'), get_identifier(movie), self.conf('internal_only')))

        if data:
            if 'error' in data:
                if self.login_fail_msg in data['error']: # Check for login failure
                    self.disableAccount()
                else:
                    log.error('%s returned an error (possible rate limit): %s', (self.getName(), data['error']))
                return

            try:
                #for result in data[]:
                for key, result in list(data.items()):
                    if try_int(result['total_results']) == 0:
                        return
                    torrentscore = self.conf('extra_score')
                    releasegroup = result['releasegroup']
                    resolution = result['resolution']
                    encoding = result['encoding']
                    freeleech = try_int(result['freeleech'])
                    seeders = try_int(result['seeders'])
                    torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)

                    if freeleech > 0 and self.conf('prefer_internal'):
                        torrent_desc += '/ Internal'
                        torrentscore += 200

                    if seeders == 0:
                        torrentscore = 0

                    name = result['release_name']
                    year = try_int(result['year'])

                    results.append({
                        'id': try_int(result['torrentid']),
                        'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
                        'url': self.urls['download'] % (result['torrentid'], result['torrentpass']),
                        'detail_url': self.urls['detail'] % result['torrentid'],
                        'size': try_int(result['size']),
                        'seeders': try_int(result['seeders']),
                        'leechers': try_int(result['leechers']),
                        'age': try_int(result['age']),
                        'score': torrentscore
                    })
            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Exemple #23
0
    def ageToDays(self, age_str):
        age = 0
        age_str = age_str.replace('&nbsp;', ' ')

        regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+'
        matches = re.findall(regex, age_str)
        for match in matches:
            nr, size = match
            mult = 1
            if size == 'week':
                mult = 7
            elif size == 'month':
                mult = 30.5
            elif size == 'year':
                mult = 365

            age += try_int(nr) * mult

        return try_int(age)
Exemple #24
0
def namePositionScore(nzb_name, movie_name):
    score = 0

    nzb_words = re.split('\W+', simplify_string(nzb_name))
    qualities = fire_event('quality.all', single=True)

    try:
        nzb_name = re.search(r'([\'"])[^\1]*\1', nzb_name).group(0)
    except:
        pass

    name_year = fire_event('scanner.name_year', nzb_name, single=True)

    # Give points for movies beginning with the correct name
    split_by = simplify_string(movie_name)
    name_split = []
    if len(split_by) > 0:
        name_split = simplify_string(nzb_name).split(split_by)
        if name_split[0].strip() == '':
            score += 10

    # If year is second in line, give more points
    if len(name_split) > 1 and name_year:
        after_name = name_split[1].strip()
        if try_int(after_name[:4]) == name_year.get('year', None):
            score += 10
            after_name = after_name[4:]

        # Give -point to crap between year and quality
        found_quality = None
        for quality in qualities:
            # Main in words
            if quality['identifier'] in nzb_words:
                found_quality = quality['identifier']

            # Alt in words
            for alt in quality['alternative']:
                if alt in nzb_words:
                    found_quality = alt
                    break

        if not found_quality:
            return score - 20

        allowed = []
        for value in name_scores:
            name, sc = value.split(':')
            allowed.append(name)

        inbetween = re.split('\W+', after_name.split(found_quality)[0].strip())

        score -= (10 * len(set(inbetween) - set(allowed)))

    return score
Exemple #25
0
    def setCrons(self):

        fire_event('schedule.remove', 'manage.update_library')
        refresh = try_int(self.conf('library_refresh_interval'))
        if refresh > 0:
            fire_event('schedule.interval',
                       'manage.update_library',
                       self.updateLibrary,
                       hours=refresh,
                       single=True)

        return True
Exemple #26
0
    def partial(self, type = 'all', lines = 30, offset = 0, **kwargs):

        total_lines = try_int(lines)
        offset = try_int(offset)

        log_lines = []

        for x in range(0, 50):

            path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')

            # Check see if the log exists
            if not os.path.isfile(path):
                break

            f = open(path, 'r')
            log_content = to_unicode(f.read())
            raw_lines = self.toList(log_content)
            raw_lines.reverse()

            brk = False
            for line in raw_lines:

                if type == 'all' or line.get('type') == type.upper():
                    log_lines.append(line)

                if len(log_lines) >= (total_lines + offset):
                    brk = True
                    break

            if brk:
                break

        log_lines = log_lines[offset:]
        log_lines.reverse()

        return {
            'success': True,
            'log': log_lines,
        }
Exemple #27
0
def sceneScore(nzb_name):

    check_names = [nzb_name]

    # Match names between "
    try:
        check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0))
    except:
        pass

    # Match longest name between []
    try:
        check_names.append(
            max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key=len).strip())
    except:
        pass

    for name in check_names:

        # Strip twice, remove possible file extensions
        name = name.lower().strip(' "\'\.-_\[\]')
        name = re.sub('\.([a-z0-9]{0,4})$', '', name)
        name = name.strip(' "\'\.-_\[\]')

        # Make sure year and groupname is in there
        year = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', name)
        group = re.findall('\-([a-z0-9]+)$', name)

        if len(year) > 0 and len(group) > 0:
            try:
                validate = fire_event('release.validate', name, single=True)
                if validate and try_int(validate.get('score')) != 0:
                    log.debug(
                        'Release "%s" scored %s, reason: %s',
                        (nzb_name, validate['score'], validate['reasons']))
                    return try_int(validate.get('score'))
            except:
                log.error('Failed scoring scene: %s', traceback.format_exc())

    return 0
Exemple #28
0
    def auto_update(self):
        do_check = True

        try:
            last_check = try_int(Env.prop(self.last_check, default=0))
            now = try_int(time.time())
            do_check = last_check < now - 43200

            if do_check:
                Env.prop(self.last_check, value=now)
        except:
            log.error('Failed checking last time to update: %s',
                      traceback.format_exc())

        if do_check and self.is_enabled() and self.check() and self.conf(
                'automatic') and not self.updater.update_failed:

            if self.updater.do_update():

                # Notify before restarting
                try:
                    if self.conf('notification'):
                        info = self.updater.info()
                        version_date = datetime.fromtimestamp(
                            info['update_version']['date'])
                        fire_event(
                            'updater.updated',
                            'CouchPotato: Updated to a new version with hash "%s", this version is from %s'
                            % (info['update_version']['hash'], version_date),
                            data=info)
                except:
                    log.error('Failed notifying for update: %s',
                              traceback.format_exc())

                fire_event_async('app.restart')

                return True

        return False
Exemple #29
0
                    def extra_check(item):
                        parts = re.search(
                            'available:.(?P<parts>\d+)./.(?P<total>\d+)',
                            info.text)
                        total = float(try_int(parts.group('total')))
                        parts = float(try_int(parts.group('parts')))

                        if (total / parts) < 1 and ((total / parts) < 0.95 or (
                            (total / parts) >= 0.95
                                and not ('par2' in info.text.lower()
                                         or 'pa3' in info.text.lower()))):
                            log.info2(
                                'Wrong: \'%s\', not complete: %s out of %s',
                                (item['name'], parts, total))
                            return False

                        if 'requires password' in info.text.lower():
                            log.info2('Wrong: \'%s\', passworded',
                                      (item['name']))
                            return False

                        return True
Exemple #30
0
    def make_relative(self):

        for static_type in self.paths:

            updates_paths = []
            for rel_path in self.paths.get(static_type):
                file_path = os.path.join(Env.get('app_dir'), 'couchpotato',
                                         'static', rel_path)
                core_url = 'static/%s?%d' % (
                    rel_path, try_int(os.path.getmtime(file_path)))

                updates_paths.append(core_url)

            self.paths[static_type] = updates_paths