Ejemplo n.º 1
0
    def _searchOnTitle(self, title, media, quality, results):

        query = '"%s" %s' % (title, media['info']['year'])

        data = {
            '/browse.php?': None,
            'cata': 'yes',
            'jxt': 8,
            'jxw': 'b',
            'search': query,
        }

        data = self.getJsonData(self.urls['search'], data = data)
        try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
        except: return

        for torrent in torrents:
            results.append({
                'id': torrent['id'],
                'name': torrent['name'],
                'url': self.urls['download'] % (torrent['id'], torrent['fname']),
                'detail_url': self.urls['detail'] % torrent['id'],
                'size': self.parseSize(torrent.get('size')),
                'seeders': tryInt(torrent.get('seed')),
                'leechers': tryInt(torrent.get('leech')),
            })
Ejemplo n.º 2
0
    def getMeta(self, filename):

        try:
            p = enzyme.parse(filename)

            # Video codec
            vc = ("h264" if p.video[0].codec == "AVC1" else p.video[0].codec).lower()

            # Audio codec
            ac = p.audio[0].codec
            try:
                ac = self.audio_codec_map.get(p.audio[0].codec)
            except:
                pass

            return {
                "video": vc,
                "audio": ac,
                "resolution_width": tryInt(p.video[0].width),
                "resolution_height": tryInt(p.video[0].height),
                "audio_channels": p.audio[0].channels,
            }
        except ParseError:
            log.debug("Failed to parse meta for %s", filename)
        except NoParserError:
            log.debug("No parser found for %s", filename)
        except:
            log.debug("Failed parsing %s", filename)

        return {}
Ejemplo n.º 3
0
    def _search(self, media, quality, results):

        url = self.urls['search'] % self.buildUrl(media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class': 'torrent'})

                for result in entries:

                    link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
                    url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')

                    results.append({
                        'id': link['href'].replace('torrents.php?torrentid=', ''),
                        'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(result.find_all('td')[5].string),
                        'seeders': tryInt(result.find_all('td')[7].string),
                        'leechers': tryInt(result.find_all('td')[8].string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 4
0
    def _search(self, movie, quality, results):

        domain = self.getDomain()
        if not domain:
            return

        search_url = self.urls['search'] % (domain, getIdentifier(movie))

        data = self.getJsonData(search_url) or {}
        data = data.get('data')

        if isinstance(data, dict) and data.get('movies'):
            try:
                for result in data.get('movies'):

                    for release in result.get('torrents', []):

                        if release['quality'] and release['quality'] not in result['title_long']:
                            title = result['title_long'] + ' BRRip ' + release['quality']
                        else:
                            title = result['title_long'] + ' BRRip'

                        results.append({
                            'id': release['hash'],
                            'name': title,
                            'url': release['url'],
                            'detail_url': result['url'],
                            'size': self.parseSize(release['size']),
                            'seeders': tryInt(release['seeds']),
                            'leechers': tryInt(release['peers']),
                        })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 5
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
        data = self.getHTMLData(url, opener = self.login_opener)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'border' : '1'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:
                    cells = result.find_all('td')

                    link = cells[1].find('a', attrs = {'class' : 'index'})

                    full_id = link['href'].replace('details.php?id=', '')
                    torrent_id = full_id[:6]

                    results.append({
                        'id': torrent_id,
                        'name': link.contents[0],
                        'url': self.urls['download'] % (torrent_id, link.contents[0]),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
                        'seeders': tryInt(cells[8].find('span').contents[0]),
                        'leechers': tryInt(cells[9].find('span').contents[0]),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 6
0
    def getIMDBids(self):

        movies = []

        if self.conf('backlog'):

            page = 0
            while True:
                page += 1

                url = self.backlog_url % page
                data = self.getHTMLData(url)
                soup = BeautifulSoup(data)

                try:
                    # Stop if the release year is before the minimal year
                    page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1]
                    if tryInt(page_year) < self.getMinimal('year'):
                        break

                    for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]:
                        name = table.h3.get_text().lower().split('blu-ray')[0].strip()
                        year = table.small.get_text().split('|')[1].strip()

                        if not name.find('/') == -1:  # make sure it is not a double movie release
                            continue

                        if tryInt(year) < self.getMinimal('year'):
                            continue

                        imdb = self.search(name, year)

                        if imdb:
                            if self.isMinimalMovie(imdb):
                                movies.append(imdb['imdb'])
                except:
                    log.debug('Error loading page: %s', page)
                    break

            self.conf('backlog', value = False)

        rss_movies = self.getRSSData(self.rss_url)

        for movie in rss_movies:
            name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip()
            year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip()

            if not name.find('/') == -1:  # make sure it is not a double movie release
                continue

            if tryInt(year) < self.getMinimal('year'):
                continue

            imdb = self.search(name, year)

            if imdb:
                if self.isMinimalMovie(imdb):
                    movies.append(imdb['imdb'])

        return movies
Ejemplo n.º 7
0
def sceneScore(nzb_name):

    check_names = [nzb_name]

    # Match names between "
    try: check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0))
    except: pass

    # Match longest name between []
    try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key = len).strip())
    except: pass

    for name in check_names:

        # Strip twice, remove possible file extensions
        name = name.lower().strip(' "\'\.-_\[\]')
        name = re.sub('\.([a-z0-9]{0,4})$', '', name)
        name = name.strip(' "\'\.-_\[\]')

        # Make sure year and groupname is in there
        year = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', name)
        group = re.findall('\-([a-z0-9]+)$', name)

        if len(year) > 0 and len(group) > 0:
            try:
                validate = fireEvent('release.validate', name, single = True)
                if validate and tryInt(validate.get('score')) != 0:
                    log.debug('Release "%s" scored %s, reason: %s', (nzb_name, validate['score'], validate['reasons']))
                    return tryInt(validate.get('score'))
            except:
                log.error('Failed scoring scene: %s', traceback.format_exc())

    return 0
Ejemplo n.º 8
0
    def _search(self, movie, quality, results):

        domain = self.getDomain()
        if not domain:
            return

        search_url = self.urls['search'] % (domain, getIdentifier(movie), quality['identifier'])

        data = self.getJsonData(search_url)

        if data and data.get('MovieList'):
            try:
                for result in data.get('MovieList'):

                    if result['Quality'] and result['Quality'] not in result['MovieTitle']:
                        title = result['MovieTitle'] + ' BrRip ' + result['Quality']
                    else:
                        title = result['MovieTitle'] + ' BrRip'

                    results.append({
                        'id': result['MovieID'],
                        'name': title,
                        'url': result['TorrentMagnetUrl'],
                        'detail_url': self.urls['detail'] % (domain, result['MovieID']),
                        'size': self.parseSize(result['Size']),
                        'seeders': tryInt(result['TorrentSeeds']),
                        'leechers': tryInt(result['TorrentPeers']),
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 9
0
    def _searchOnTitle(self, title, media, quality, results):

        query = '"%s" %s' % (title, media['info']['year'])

        post_data = {
            '/browse.php?': None,
            'cata': 'yes',
            'jxt': 4,
            'jxw': 'b',
            'search': query,
        }
        
        try:
            data = self.getJsonData(self.urls['search'], data=post_data)
            torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
        except:
            pass

        try:
            for torrent in torrents:
                results.append({
                    'id': torrent['id'],
                    'name': torrent['name'],
                    'url': self.urls['download'] % torrent['id'],
                    'detail_url': self.urls['detail'] % torrent['id'],
                    'size': self.parseSize(torrent.get('size')),
                    'seeders': tryInt(torrent.get('seed')),
                    'leechers': tryInt(torrent.get('leech')),
                })
        except:
            log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 10
0
    def getMeta(self, filename):

        try:
            p = enzyme.parse(filename)

            # Video codec
            vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower()

            # Audio codec
            ac = p.audio[0].codec
            try: ac = self.audio_codec_map.get(p.audio[0].codec)
            except: pass

            return {
                'video': vc,
                'audio': ac,
                'resolution_width': tryInt(p.video[0].width),
                'resolution_height': tryInt(p.video[0].height),
            }
        except ParseError:
            log.debug('Failed to parse meta for %s', filename)
        except NoParserError:
            log.debug('No parser found for %s', filename)
        except:
            log.debug('Failed parsing %s', filename)

        return {}
Ejemplo n.º 11
0
    def getIMDBids(self):

        movies = []

        urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))

        for url in urls:

            if not urls[url]:
                continue

            rss_movies = self.getRSSData(url)

            for movie in rss_movies:

                description = self.getTextElement(movie, 'description')
                grabs = 0

                for item in movie:
                    if item.attrib.get('name') == 'grabs':
                        grabs = item.attrib.get('value')
                        break

                if int(grabs) > tryInt(self.conf('number_grabs')):
                    title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
                    log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
                    year = re.match(r'.*Year: (\d{4}).*', description).group(1)
                    imdb = self.search(title, year)

                    if imdb and self.isMinimalMovie(imdb):
                        movies.append(imdb['imdb'])

        return movies
Ejemplo n.º 12
0
    def _searchOnTitle(self, title, movie, quality, results):

        page = 0
        total_pages = 1
        cats = self.getCatId(quality['identifier'])

        while page < total_pages:

            search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s" %s' % (title, movie['library']['year'])), page, ','.join(str(x) for x in cats))
            page += 1

            data = self.getHTMLData(search_url)

            if data:
                try:
                    soup = BeautifulSoup(data)
                    results_table = soup.find('table', attrs = {'id': 'searchResult'})

                    if not results_table:
                        return

                    try:
                        total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
                    except:
                        pass

                    entries = results_table.find_all('tr')
                    for result in entries[2:]:
                        link = result.find(href = re.compile('torrent\/\d+\/'))
                        download = result.find(href = re.compile('magnet:'))

                        try:
                            size = re.search('Size (?P<size>.+),', six.text_type(result.select('font.detDesc')[0])).group('size')
                        except:
                            continue

                        if link and download:

                            def extra_score(item):
                                trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
                                vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
                                confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
                                moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]

                                return confirmed + trusted + vip + moderated

                            results.append({
                                'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
                                'name': link.string,
                                'url': download['href'],
                                'detail_url': self.getDomain(link['href']),
                                'size': self.parseSize(size),
                                'seeders': tryInt(result.find_all('td')[2].string),
                                'leechers': tryInt(result.find_all('td')[3].string),
                                'extra_score': extra_score,
                                'get_more_info': self.getMoreInfo
                            })

                except:
                    log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 13
0
    def _searchOnTitle(self, title, media, quality, results):

        url = self.urls['search'] % self.buildUrl(title, media, quality)

        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrenttable'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:

                    link = result.find('td', attrs = {'class': 'name'}).find('a')
                    url = result.find('td', attrs = {'class': 'quickdownload'}).find('a')
                    details = result.find('td', attrs = {'class': 'name'}).find('a')

                    results.append({
                        'id': link['href'].replace('/torrent/', ''),
                        'name': six.text_type(link.string),
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % details['href'],
                        'size': self.parseSize(result.find_all('td')[4].string),
                        'seeders': tryInt(result.find('td', attrs = {'class': 'seeders'}).string),
                        'leechers': tryInt(result.find('td', attrs = {'class': 'leechers'}).string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 14
0
    def parseResults(self, results, entries, year, quality, title):
        # print "//"*40
        # print year
        new = {}
        for result in entries:
            tds = result.findAll('td')
            if len(tds) != 9:
                log.info("Wrong search result format, skipping.")
                continue
            try:
                new['detail_url'] = tds[0].a['href']
                new['size'] = self.parseSize("%s GB" % tds[7].span.text)
                new['id'] = tds[0].a['href'].split('showtopic=')[1]
                new['age'], new['url'] = self.getTorrentLink(new['detail_url'])
                new['name'] = self.standardize_title(tds[0].a.text, title, year, quality, self.desc)
                new['seeders'] = tryInt(tds[5].span.text)
                new['leechers'] = tryInt(tds[4].span.text)
                new['score'] = self.conf('extra_score') + 20

            except Exception as e:
                log.info("Search entry processing FAILED!")
                print(e)
                continue

            results.append(new)
            log.debug("New result %s", new)
Ejemplo n.º 15
0
    def _search(self, media, quality, results):

        url = self.buildUrl(media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                resultsTable = html.find('table', attrs = {'id': 'torrents-table'})
                if resultsTable is None:
                    return

                entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'})
                for result in entries:

                    link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
                    url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
                    leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
                    torrent_id = link['href'].replace('details?id=', '')

                    results.append({
                        'id': torrent_id,
                        'name': link['title'],
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
                        'seeders': tryInt(result.find('td', attrs = {'class': 'ttr_seeders'}).find('a').string),
                        'leechers': tryInt(leechers.string) if leechers else 0,
                        'get_more_info': self.getMoreInfo,
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 16
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % (tryUrlencode(title.replace(':', '') + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
        data = self.getHTMLData(url, opener = self.login_opener)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id' : 'torrenttable'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:

                    link = result.find('td', attrs = {'class' : 'name'}).find('a')
                    url = result.find('td', attrs = {'class' : 'quickdownload'}).find('a')
                    details = result.find('td', attrs = {'class' : 'name'}).find('a')

                    results.append({
                        'id': link['href'].replace('/torrent/', ''),
                        'name': link.string,
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % details['href'],
                        'download': self.loginDownload,
                        'size': self.parseSize(result.find_all('td')[4].string),
                        'seeders': tryInt(result.find('td', attrs = {'class' : 'seeders'}).string),
                        'leechers': tryInt(result.find('td', attrs = {'class' : 'leechers'}).string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 17
0
    def _searchOnTitle(self, title, movie, quality, results):

        movieTitle = tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year']))
        url = self.urls['search'] % (self.getSceneOnly(), movieTitle)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class': 'torrent'})
                for result in entries:

                    link = result.find('a', attrs = {'dir': 'ltr'})
                    url = result.find('span', attrs = {'title': 'Download'}).parent
                    tds = result.find_all('td')
                    size = tds[5].contents[0].strip('\n ')

                    results.append({
                        'id': link['href'].replace('torrents.php?id=', '').split('&')[0],
                        'name': link.contents[0],
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(size),
                        'seeders': tryInt(tds[len(tds)-2].string),
                        'leechers': tryInt(tds[len(tds)-1].string),
                    })
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 18
0
    def autoUpdate(self):
        do_check = True

        try:
            last_check = tryInt(Env.prop(self.last_check, default = 0))
            now = tryInt(time.time())
            do_check = last_check < now - 43200

            if do_check:
                Env.prop(self.last_check, value = now)
        except:
            log.error('Failed checking last time to update: %s', traceback.format_exc())

        if do_check and self.isEnabled() and self.check() and self.conf('automatic') and not self.updater.update_failed:

            if self.updater.doUpdate():

                # Notify before restarting
                try:
                    if self.conf('notification'):
                        info = self.updater.info()
                        version_date = datetime.fromtimestamp(info['update_version']['date'])
                        fireEvent('updater.updated', 'CouchPotato: Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info)
                except:
                    log.error('Failed notifying for update: %s', traceback.format_exc())

                fireEventAsync('app.restart')

                return True

        return False
Ejemplo n.º 19
0
    def parseMovie(self, movie):

        movie_data = {}
        try:

            if isinstance(movie, (str, unicode)):
                movie = json.loads(movie)

            movie_data = {
                'titles': [movie.get('Title', '')],
                'original_title': movie.get('Title', ''),
                'images': {
                    'poster': [movie.get('Poster', '')],
                },
                'rating': {
                    'imdb': (tryFloat(movie.get('Rating', 0)), tryInt(movie.get('Votes', ''))),
                    'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', 0))),
                },
                'imdb': str(movie.get('ID', '')),
                'runtime': self.runtimeToMinutes(movie.get('Runtime', '')),
                'released': movie.get('Released', ''),
                'year': movie.get('Year', ''),
                'plot': movie.get('Plot', ''),
                'genres': movie.get('Genre', '').split(','),
                'directors': movie.get('Director', '').split(','),
                'writers': movie.get('Writer', '').split(','),
                'actors': movie.get('Actors', '').split(','),
            }
        except:
            log.error('Failed parsing IMDB API json: %s' % traceback.format_exc())

        return movie_data
Ejemplo n.º 20
0
    def _search(self, media, quality, results):

        url = self.urls['search'] % self.buildUrl(media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class': 'torrent'})
                for result in entries:

                    link = result.find('a', attrs = {'dir': 'ltr'})
                    url = result.find('a', attrs = {'title': 'Download'})
                    tds = result.find_all('td')
                    size = tds[4].contents[0].strip('\n ')

                    results.append({
                        'id': link['href'].replace('torrents.php?id=', '').split('&')[0],
                        'name': link.contents[0],
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(size),
                        'seeders': tryInt(tds[len(tds)-2].string),
                        'leechers': tryInt(tds[len(tds)-1].string),
                    })
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 21
0
    def _search(self, movie, quality, results):

        data = self.getJsonData(self.urls['search'] % (movie['library']['identifier'], quality['identifier']))

        if data and data.get('MovieList'):
            try:
                for result in data.get('MovieList'):

                    try:
                        title = result['TorrentUrl'].split('/')[-1][:-8].replace('_', '.').strip('._')
                        title = title.replace('.-.', '-')
                        title = title.replace('..', '.')
                    except:
                        continue

                    results.append({
                        'id': result['MovieID'],
                        'name': title,
                        'url': result['TorrentUrl'],
                        'detail_url': self.urls['detail'] % result['MovieID'],
                        'size': self.parseSize(result['Size']),
                        'seeders': tryInt(result['TorrentSeeds']),
                        'leechers': tryInt(result['TorrentPeers'])
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 22
0
    def _search(self, movie, quality, results):
        log.debug('Session response: %s' % (self.response.cookies))
        if not self.login_opener and not self.login():
            return

        imdb_id = movie['library']['identifier']
        search = self.login_opener.get(self.urls['search'] % (imdb_id))

        j = search.json
        try:
            for result in j:
                url = self.urls['download'] % (result['title'], result['id'], self.conf('passkey'))
                detail = self.urls['detail'] % result['id']

                results.append({
                    'id': result['id'],
                    'name': result['title'],
                    'url': url,
                    'detail_url': detail,
                    'size': self.parseSize(result['size']),
                    'seeders': tryInt(result['seeder']),
                    'leechers': tryInt(result['leecher'])
                })

                log.debug('Results: %s' % (results))

        except:
            log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 23
0
 def ageToDays(self, age_str):
     if 'aujour' in age_str.lower():
         return tryInt('0')
     elif 'hier' in age_str.lower():
         return tryInt('1')
     else:
         try:
             from_dt = datetime.datetime.strptime(age_str[0:2]+'-'+self.littonum(age_str[3:6])+'-'+age_str[7:11], "%d-%m-%Y")
         except:
             from_dt = datetime.datetime.strptime(age_str[0:2]+'-'+self.littonum(age_str[3:6])+'-'+age_str[7:11], "%m-%d-%Y")
         try:
             to_dt = datetime.datetime.strptime(time.strftime("%x"), "%d/%m/%Y")
         except:
             try:
                 to_dt = datetime.datetime.strptime(time.strftime("%x"), "%m/%d/%Y")
             except:
                 try:
                     to_dt = datetime.datetime.strptime(time.strftime("%x"), "%m/%d/%y")
                 except:
                     try:
                         to_dt = datetime.datetime.strptime(time.strftime("%x"), "%d/%m/%y")
                     except:
                         return tryInt('0')
         timedelta = to_dt - from_dt
         diff_day = timedelta.days
         return tryInt(diff_day)
Ejemplo n.º 24
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '"%s %s"' % (title, movie['library']['year'])

        params = {
            '/browse.php?': None,
            'cata': 'yes',
            'jxt': 8,
            'jxw': 'b',
            'search': q,
        }

        data = self.getJsonData(self.urls['search'], params = params, opener = self.login_opener)
        try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
        except: return

        for torrent in torrents:
            results.append({
                'id': torrent['id'],
                'name': torrent['name'],
                'url': self.urls['download'] % (torrent['id'], torrent['fname']),
                'detail_url': self.urls['detail'] % torrent['id'],
                'size': self.parseSize(torrent.get('size')),
                'seeders': tryInt(torrent.get('seed')),
                'leechers': tryInt(torrent.get('leech')),
                'download': self.loginDownload,
            })
Ejemplo n.º 25
0
    def _search(self, media, quality, results):

        nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media))

        for nzb in nzbs:

            nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
            enclosure = self.getElement(nzb, "enclosure").attrib
            size = enclosure['length']
            date = self.getTextElement(nzb, "pubDate")

            def extra_check(item):
                full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)

                for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
                    if ignored in full_description:
                        log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
                        return False

                return True

            results.append({
                'id': nzbclub_id,
                'name': toUnicode(self.getTextElement(nzb, "title")),
                'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
                'size': tryInt(size) / 1024 / 1024,
                'url': enclosure['url'].replace(' ', '_'),
                'detail_url': self.getTextElement(nzb, "link"),
                'get_more_info': self.getMoreInfo,
                'extra_check': extra_check
            })
Ejemplo n.º 26
0
 def _search(self, movie, quality, results):
     title = getIdentifier(movie)
     data = self._post_query(title, self.getNorbitsQuality(quality.get('custom').get('quality')))
     if data:
         log.info('We got data: %s' % data)
         try:
             for result in data:
                 log.info('We got result: %s' % result)
                 download_url = self.getDownloadUrl(result['id'])
                 details_url = self.urls['detail'] % result['id']
                 log.info('Download url: %s' % download_url)
                 log.info('Details url: %s' % details_url)
                 append_data = {
                     'id': result['id'],
                     'name': result['name'],
                     'detail_url': details_url,
                     'size': tryInt(int(result['size']) / 1024 / 1024),
                     'seeders': tryInt(result['seeders']),
                     'leechers': tryInt(result['leechers']),
                     'url': download_url
                 }
                 log.info('Appending data: %s' % json.dumps(append_data))
                 results.append(append_data)
         except:
             log.error('Failed getting resutls from %s: %s' % (self.getName(), traceback.format_exc()))
         finally:
             log.info('Final results: %s' % results)
     return results
Ejemplo n.º 27
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '%s %s' % (title, movie['info']['year'])
        params = tryUrlencode({
            'search': q,
            'catid': ','.join([str(x) for x in self.getCatId(quality)]),
            'user': self.conf('username', default = ''),
            'api': self.conf('api_key', default = ''),
        })
        
        if len(self.conf('custom_tag')) > 0:
            params = '%s&%s' % (params, self.conf('custom_tag'))

        nzbs = self.getJsonData(self.urls['search'] % params)

        if isinstance(nzbs, list):
            for nzb in nzbs:

                results.append({
                    'id': nzb.get('nzbid'),
                    'name': toUnicode(nzb.get('release')),
                    'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
                    'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
                    'url': nzb.get('getnzb'),
                    'detail_url': nzb.get('details'),
                    'description': nzb.get('weblink')
                })
Ejemplo n.º 28
0
    def _searchOnTitle(self, title, movie, quality, results):

        scene_only = '1' if self.conf('scene_only') else ''

        url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), scene_only, self.getCatId(quality['identifier'])[0])
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id' : 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class' : 'torrent'})

                for result in entries:

                    link = result.find('span', attrs = {'class' : 'torrent_name_link'}).parent
                    url = result.find('td', attrs = {'class' : 'torrent_td'}).find('a')

                    results.append({
                        'id': link['href'].replace('torrents.php?torrentid=', ''),
                        'name': unicode(link.span.string).translate({ord(u'\xad'): None}),
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(result.find_all('td')[4].string),
                        'seeders': tryInt(result.find_all('td')[6].string),
                        'leechers': tryInt(result.find_all('td')[7].string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 29
0
    def fill(self):

        try:
            db = get_db()

            order = 0
            for q in self.qualities:

                db.insert({
                    '_t': 'quality',
                    'order': order,
                    'identifier': q.get('identifier'),
                    'size_min': tryInt(q.get('size')[0]),
                    'size_max': tryInt(q.get('size')[1]),
                })

                log.info('Creating profile: %s', q.get('label'))
                db.insert({
                    '_t': 'profile',
                    'order': order + 20,  # Make sure it goes behind other profiles
                    'core': True,
                    'qualities': [q.get('identifier')],
                    'label': toUnicode(q.get('label')),
                    'finish': [True],
                    'wait_for': [0],
                })

                order += 1

            return True
        except:
            log.error('Failed: %s', traceback.format_exc())

        return False
Ejemplo n.º 30
0
    def _searchOnTitle(self, title, movie, quality, results):

        freeleech = '' if not self.conf('freeleech') else '&free=on'

        pages = 1
        current_page = 1
        while current_page <= pages and not self.shuttingDown():

            url = self.urls['search'] % (self.getCatId(quality['identifier'])[0], freeleech, tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), current_page)
            data = self.getHTMLData(url, opener = self.login_opener)

            if data:
                html = BeautifulSoup(data)

                try:
                    page_nav = html.find('span', attrs = {'class' : 'page_nav'})
                    if page_nav:
                        next_link = page_nav.find("a", text = "Next")
                        if next_link:
                            final_page_link = next_link.previous_sibling.previous_sibling
                            pages = int(final_page_link.string)

                    result_table = html.find('table', attrs = {'class' : 'torrents'})

                    if not result_table or 'nothing found!' in data.lower():
                        return

                    entries = result_table.find_all('tr')

                    for result in entries[1:]:

                        torrent = result.find_all('td')
                        if len(torrent) <= 1:
                            break

                        torrent = torrent[1].find('a')

                        torrent_id = torrent['href'].replace('/details.php?id=', '')
                        torrent_name = torrent.string
                        torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.')
                        torrent_details_url = self.urls['base_url'] + torrent['href']
                        torrent_size = self.parseSize(result.find_all('td')[5].string)
                        torrent_seeders = tryInt(result.find('td', attrs = {'class' : 'ac t_seeders'}).string)
                        torrent_leechers = tryInt(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)

                        results.append({
                            'id': torrent_id,
                            'name': torrent_name,
                            'url': torrent_download_url,
                            'detail_url': torrent_details_url,
                            'size': torrent_size,
                            'seeders': torrent_seeders,
                            'leechers': torrent_leechers,
                        })

                except:
                    log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
                    break

            current_page += 1
Ejemplo n.º 31
0
    def _search(self, media, quality, results):

        data = self.getHTMLData(self.urls['search'] % self.buildUrl(media, quality))

        if data:
            try:

                html = BeautifulSoup(data)
                main_table = html.find('table', attrs = {'id': 'r2'})

                if not main_table:
                    return

                items = main_table.find_all('tr')

                for row in items:
                    title = row.find('span', attrs = {'class': 's'})

                    if not title: continue

                    nzb_id = row.find('input', attrs = {'type': 'checkbox'})['name']
                    info = row.find('span', attrs = {'class':'d'})
                    size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text)

                    age = 0
                    try: age = re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]
                    except: pass

                    def extra_check(item):
                        parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
                        total = float(tryInt(parts.group('total')))
                        parts = float(tryInt(parts.group('parts')))

                        if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))):
                            log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
                            return False

                        if 'requires password' in info.text.lower():
                            log.info2('Wrong: \'%s\', passworded', (item['name']))
                            return False

                        return True

                    results.append({
                        'id': nzb_id,
                        'name': simplifyString(title.text),
                        'age': tryInt(age),
                        'size': self.parseSize(size_match.group('size')),
                        'url': self.urls['download'] % nzb_id,
                        'detail_url': self.urls['detail'] % info.find('a')['href'],
                        'extra_check': extra_check
                    })

            except:
                log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc())
Ejemplo n.º 32
0
class NZBGet(Downloader):

    type = ['nzb']

    url = 'http://*****:*****@%(host)s/xmlrpc'

    def download(self, data={}, movie={}, filedata=None):

        if not filedata:
            log.error('Unable to get NZB file: %s', traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get('name'))

        url = self.url % {
            'host': self.conf('host'),
            'password': self.conf('password')
        }
        nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))

        rpc = xmlrpclib.ServerProxy(url)
        try:
            if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' %
                            nzb_name):
                log.info('Successfully connected to NZBGet')
            else:
                log.info(
                    'Successfully connected to NZBGet, but unable to send a message'
                )
        except socket.error:
            log.error(
                'NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.'
            )
            return False
        except xmlrpclib.ProtocolError, e:
            if e.errcode == 401:
                log.error('Password is incorrect.')
            else:
                log.error('Protocol Error: %s', e)
            return False

        if re.search(r"^0", rpc.version()):
            xml_response = rpc.append(nzb_name, self.conf('category'), False,
                                      standard_b64encode(filedata.strip()))
        else:
            xml_response = rpc.append(nzb_name, self.conf('category'),
                                      tryInt(self.conf('priority')), False,
                                      standard_b64encode(filedata.strip()))

        if xml_response:
            log.info('NZB sent successfully to NZBGet')
            return True
        else:
            log.error('NZBGet could not add %s to the queue.', nzb_name)
            return False
Ejemplo n.º 33
0
    def download(self, data, movie, filedata=None):

        log.info('Sending "%s" (%s) to Transmission.',
                 (data.get('name'), data.get('protocol')))

        if not self.connect():
            return False

        if not filedata and data.get('protocol') == 'torrent':
            log.error('Failed sending torrent, no data')
            return False

        # Set parameters for adding torrent
        params = {'paused': self.conf('paused', default=False)}

        if self.conf('directory'):
            if os.path.isdir(self.conf('directory')):
                params['download-dir'] = self.conf('directory')
            else:
                log.error(
                    'Download directory from Transmission settings: %s doesn\'t exist',
                    self.conf('directory'))

        # Change parameters of torrent
        torrent_params = {}
        if data.get('seed_ratio'):
            torrent_params['seedRatioLimit'] = tryFloat(data.get('seed_ratio'))
            torrent_params['seedRatioMode'] = 1

        if data.get('seed_time'):
            torrent_params['seedIdleLimit'] = tryInt(
                data.get('seed_time')) * 60
            torrent_params['seedIdleMode'] = 1

        # Send request to Transmission
        if data.get('protocol') == 'torrent_magnet':
            remote_torrent = self.trpc.add_torrent_uri(data.get('url'),
                                                       arguments=params)
            torrent_params['trackerAdd'] = self.torrent_trackers
        else:
            remote_torrent = self.trpc.add_torrent_file(b64encode(filedata),
                                                        arguments=params)

        if not remote_torrent:
            log.error('Failed sending torrent to Transmission')
            return False

        # Change settings of added torrents
        if torrent_params:
            self.trpc.set_torrent(
                remote_torrent['torrent-added']['hashString'], torrent_params)

        log.info('Torrent sent to Transmission successfully.')
        return self.downloadReturnId(
            remote_torrent['torrent-added']['hashString'])
Ejemplo n.º 34
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '"%s %s"' % (simplifyString(title), movie['library']['year'])
        arguments = tryUrlencode({
            'search': q,
        })
        url = "%s&%s" % (self.urls['search'], arguments)

        data = self.getHTMLData(url, opener = self.login_opener)

        if data:
            html = BeautifulSoup(data)

            try:
                resultsTable = html.find_all('table')[6]
                entries = resultsTable.find_all('tr')
                for result in entries[1:]:

                    all_cells = result.find_all('td')

                    detail_link = all_cells[2].find('a')
                    details = detail_link['href']
                    torrent_id = details.replace('details.php?id=', '')

                    leechers = all_cells[11].find('a')
                    if leechers:
                        leechers = leechers.string
                    else:
                        leechers = all_cells[11].string

                    results.append({
                        'id': torrent_id,
                        'name': detail_link['title'],
                        'size': self.parseSize(all_cells[7].string),
                        'seeders': tryInt(all_cells[10].find('a').string),
                        'leechers': tryInt(leechers),
                        'url': self.urls['download'] % torrent_id,
                        'description': all_cells[1].find('a')['href'],
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 35
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '"%s %s"' % (title, movie['library']['year'])

        params = tryUrlencode({
            'q': q,
            'ig': 1,
            'rpp': 200,
            'st': 5,
            'sp': 1,
            'ns': 1,
        })

        nzbs = self.getRSSData(self.urls['search'] % params)

        for nzb in nzbs:

            nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
            enclosure = self.getElement(nzb, "enclosure").attrib
            size = enclosure['length']
            date = self.getTextElement(nzb, "pubDate")

            def extra_check(item):
                full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)

                for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
                    if ignored in full_description:
                        log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
                        return False

                return True

            results.append({
                'id': nzbclub_id,
                'name': toUnicode(self.getTextElement(nzb, "title")),
                'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
                'size': tryInt(size) / 1024 / 1024,
                'url': enclosure['url'].replace(' ', '_'),
                'detail_url': self.getTextElement(nzb, "link"),
                'get_more_info': self.getMoreInfo,
                'extra_check': extra_check
            })
Ejemplo n.º 36
0
    def fill(self):

        try:
            db = get_db()

            order = 0
            for q in self.qualities:

                existing = None
                try:
                    existing = db.get('quality', q.get('identifier'))
                except RecordNotFound:
                    pass

                if not existing:
                    db.insert({
                        '_t': 'quality',
                        'order': order,
                        'identifier': q.get('identifier'),
                        'size_min': tryInt(q.get('size')[0]),
                        'size_max': tryInt(q.get('size')[1]),
                    })

                    log.info('Creating profile: %s', q.get('label'))
                    db.insert({
                        '_t': 'profile',
                        'order':
                        order + 20,  # Make sure it goes behind other profiles
                        'core': True,
                        'qualities': [q.get('identifier')],
                        'label': toUnicode(q.get('label')),
                        'finish': [True],
                        'wait_for': [0],
                    })

                order += 1

            return True
        except:
            log.error('Failed: %s', traceback.format_exc())

        return False
Ejemplo n.º 37
0
    def _search(self, media, quality, results):

        url = self.urls['search'] % self.buildUrl(media, quality)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table',
                                         attrs={'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr',
                                                attrs={'class': 'torrent'})
                for result in entries:

                    link = result.find('a', attrs={'dir': 'ltr'})
                    url = result.find('a', attrs={'title': 'Download'})
                    tds = result.find_all('td')

                    results.append({
                        'id':
                        link['href'].replace('torrents.php?id=',
                                             '').split('&')[0],
                        'name':
                        link.contents[0],
                        'url':
                        self.urls['download'] % url['href'],
                        'detail_url':
                        self.urls['download'] % link['href'],
                        'size':
                        self.parseSize(tds[len(tds) - 4].string),
                        'seeders':
                        tryInt(tds[len(tds) - 2].string),
                        'leechers':
                        tryInt(tds[len(tds) - 1].string),
                    })
            except:
                log.error('Failed to parsing %s: %s',
                          (self.getName(), traceback.format_exc()))
Ejemplo n.º 38
0
    def _search(self, movie, quality, results):

        domain = self.getDomain()
        if not domain:
            return

        search_url = self.urls['search'] % (domain, getIdentifier(movie),
                                            quality['identifier'])

        data = self.getJsonData(search_url)

        if data and data.get('MovieList'):
            try:
                for result in data.get('MovieList'):

                    if result['Quality'] and result['Quality'] not in result[
                            'MovieTitle']:
                        title = result['MovieTitle'] + ' BrRip ' + result[
                            'Quality']
                    else:
                        title = result['MovieTitle'] + ' BrRip'

                    results.append({
                        'id':
                        result['MovieID'],
                        'name':
                        title,
                        'url':
                        result['TorrentMagnetUrl'],
                        'detail_url':
                        self.urls['detail'] % (domain, result['MovieID']),
                        'size':
                        self.parseSize(result['Size']),
                        'seeders':
                        tryInt(result['TorrentSeeds']),
                        'leechers':
                        tryInt(result['TorrentPeers']),
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Ejemplo n.º 39
0
    def ageToDays(self, age_str):
        age = 0
        age_str = age_str.replace('&nbsp;', ' ')
        regex = '(\d*.?\d+).(sec|heure|heures|jour|jours|semaine|semaines|mois|ans|an)+'
        matches = re.findall(regex, age_str)
        for match in matches:
            nr, size = match
            mult = 0
            if size in ('jour','jours'):
                mult = 1
            if size in ('semaine','semaines'):
                mult = 7
            elif size == 'mois':
                mult = 30
            elif size in ('ans','an'):
                mult = 365

            age += tryInt(nr) * mult

        return tryInt(age)
Ejemplo n.º 40
0
    def register(self, api_path, file_path, type, location):

        api_path = '%s?%s' % (api_path, tryInt(os.path.getmtime(file_path)))

        if not self.urls[type].get(location):
            self.urls[type][location] = []
        self.urls[type][location].append(api_path)

        if not self.paths[type].get(location):
            self.paths[type][location] = []
        self.paths[type][location].append(file_path)
Ejemplo n.º 41
0
def namePositionScore(nzb_name, movie_name):
    score = 0

    nzb_words = re.split('\W+', simplifyString(nzb_name))
    qualities = fireEvent('quality.all', single=True)

    try:
        nzb_name = re.search(r'([\'"])[^\1]*\1', nzb_name).group(0)
    except:
        pass

    name_year = fireEvent('scanner.name_year', nzb_name, single=True)

    # Give points for movies beginning with the correct name
    split_by = simplifyString(movie_name)
    name_split = []
    if len(split_by) > 0:
        name_split = simplifyString(nzb_name).split(split_by)
        if name_split[0].strip() == '':
            score += 10

    # If year is second in line, give more points
    if len(name_split) > 1 and name_year:
        after_name = name_split[1].strip()
        if tryInt(after_name[:4]) == name_year.get('year', None):
            score += 10
            after_name = after_name[4:]

        # Give -point to crap between year and quality
        found_quality = None
        for quality in qualities:
            # Main in words
            if quality['identifier'] in nzb_words:
                found_quality = quality['identifier']

            # Alt in words
            for alt in quality['alternative']:
                if alt in nzb_words:
                    found_quality = alt
                    break

        if not found_quality:
            return score - 20

        allowed = []
        for value in name_scores:
            name, sc = value.split(':')
            allowed.append(name)

        inbetween = re.split('\W+', after_name.split(found_quality)[0].strip())

        score -= (10 * len(set(inbetween) - set(allowed)))

    return score
Ejemplo n.º 42
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
        data = self.getHTMLData(url, opener = self.login_opener)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id' : 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class' : 'torrent'})

                for result in entries:

                    link = result.find('span', attrs = {'class' : 'torrent_name_link'}).parent
                    url = result.find('td', attrs = {'class' : 'torrent_td'}).find('a')

                    extra_info = ''
                    if result.find('span', attrs = {'class' : 'torrent_extra_info'}):
                        extra_info = result.find('span', attrs = {'class' : 'torrent_extra_info'}).text

                    if not self.conf('scene only') or extra_info != '[NotScene]':
                        results.append({
                            'id': link['href'].replace('torrents.php?torrentid=', ''),
                            'name': unicode(link.span.string).translate({ord(u'\xad'): None}),
                            'url': self.urls['download'] % url['href'],
                            'detail_url': self.urls['download'] % link['href'],
                            'download': self.loginDownload,
                            'size': self.parseSize(result.find_all('td')[4].string),
                            'seeders': tryInt(result.find_all('td')[6].string),
                            'leechers': tryInt(result.find_all('td')[7].string),
                        })
                        log.info('Adding release %s' % unicode(link.span.string).translate({ord(u'\xad'): None}))
                    else:
                        log.info('Not adding release %s [NotScene]' % unicode(link.span.string).translate({ord(u'\xad'): None}))

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 43
0
    def _search(self, movie, quality, results):

        search_url = self.urls['search'] % (self.getDomain(),
                                            movie['library']['identifier'],
                                            quality['identifier'])

        data = self.getJsonData(search_url)

        if data and data.get('MovieList'):
            try:
                for result in data.get('MovieList'):

                    try:
                        title = result['TorrentUrl'].split(
                            '/')[-1][:-8].replace('_', '.').strip('._')
                        title = title.replace('.-.', '-')
                        title = title.replace('..', '.')
                    except:
                        continue

                    results.append({
                        'id':
                        result['MovieID'],
                        'name':
                        title,
                        'url':
                        result['TorrentMagnetUrl'],
                        'detail_url':
                        self.urls['detail'] %
                        (self.getDomain(), result['MovieID']),
                        'size':
                        self.parseSize(result['Size']),
                        'seeders':
                        tryInt(result['TorrentSeeds']),
                        'leechers':
                        tryInt(result['TorrentPeers'])
                    })

            except:
                log.error('Failed getting results from %s: %s',
                          (self.getName(), traceback.format_exc()))
Ejemplo n.º 44
0
    def _search(self, media, quality, results):

        query = self.buildUrl(media, quality)

        url = "%s&%s" % (self.urls['search'], query)

        data = self.getHTMLData(url)

        if data:
            # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
            split_data = data.partition('-->')
            if '## SELECT COUNT(' in split_data[0]:
                data = split_data[2]

            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'width': '750', 'class': ''})
                if result_table is None:
                    return

                entries = result_table.find_all('tr')
                for result in entries[1:]:

                    cells = result.find_all('td')
                    link = cells[2].find('a')
                    torrent_id = link['href'].replace('/details.php?id=', '')

                    results.append({
                        'id': torrent_id,
                        'name': link.contents[0].get_text(),
                        'url': cells[0].find('a')['href'],
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': self.parseSize(cells[6].get_text()),
                        'seeders': tryInt(cells[8].string),
                        'leechers': tryInt(cells[9].string),
                        'get_more_info': self.getMoreInfo,
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Ejemplo n.º 45
0
    def edit(self, id = '', **kwargs):

        db = get_session()

        available_status = fireEvent('status.get', 'available', single = True)

        ids = splitString(id)
        for media_id in ids:

            m = db.query(Media).filter_by(id = media_id).first()
            if not m:
                continue

            m.profile_id = kwargs.get('profile_id')

            cat_id = kwargs.get('category_id')
            if cat_id is not None:
                m.category_id = tryInt(cat_id) if tryInt(cat_id) > 0 else None

            # Remove releases
            for rel in m.releases:
                if rel.status_id is available_status.get('id'):
                    db.delete(rel)
                    db.commit()

            # Default title
            if kwargs.get('default_title'):
                for title in m.library.titles:
                    title.default = toUnicode(kwargs.get('default_title', '')).lower() == toUnicode(title.title).lower()

            db.commit()

            fireEvent('media.restatus', m.id)

            movie_dict = m.to_dict(self.default_dict)
            fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))

        db.expire_all()
        return {
            'success': True,
        }
Ejemplo n.º 46
0
    def _search(self, movie, quality, results):
        headers = {}
        headers['Authorization'] = self.auth_token

        for title in movie['info']['titles']:
           try:
                TitleStringReal = str(title.encode("latin-1").replace('-',' '))

                url = self.urls['search'] % TitleStringReal
                url = url + '?cat=631&limit=100'
                data = self.getJsonData(url, None, headers = headers)

                for currentresult in data['torrents']:
                    if currentresult['categoryname'] in ['Film', 'Animation']:
                        name = currentresult['name']
                        splittedReleaseName = re.split('(?:\(|\.|\s)([0-9]{4})(?:\)|\.|\s)', name, flags=re.IGNORECASE)

                        if len(splittedReleaseName) > 1:
                            cleanedReleaseName = ''.join(splittedReleaseName[0:-2])

                            match = re.compile(ur"[\w]+", re.UNICODE)
                            nameSplit = ''.join(match.findall(unicodedata.normalize('NFKD', cleanedReleaseName.upper()).encode('ASCII','ignore')))
                            titleSplit = ''.join(match.findall(unicodedata.normalize('NFKD', title.upper()).encode('ASCII','ignore')))

                            if titleSplit == nameSplit:
                                new = {}
                                new['id'] = currentresult['id']
                                new['name'] = name
                                new['url'] =  self.urls['download'] % (currentresult['id'])
                                new['detail_url'] = self.urls['torrent'] % (currentresult['rewritename'])
                                new['size'] = tryInt(currentresult['size']) / 1024 / 1024
                                new['seeders'] = tryInt(currentresult['seeders'])
                                new['leechers'] = tryInt(currentresult['leechers'])
                                new['authtoken'] = self.auth_token
                                new['download'] = self.loginDownload

                                results.append(new)
           except:
                continue

        return
Ejemplo n.º 47
0
 def ageToDays(self, age_str):
     try:
         from_dt = datetime.datetime.strptime(age_str[9:11]+'-'+age_str[12:14]+'-'+age_str[15:], "%d-%m-%Y")
     except:
         from_dt = datetime.datetime.strptime(age_str[9:11]+'-'+age_str[12:14]+'-'+age_str[15:], "%m-%d-%Y")
     try:
         to_dt = datetime.datetime.strptime(time.strftime("%x"), "%d/%m/%Y")
     except:
         to_dt = datetime.datetime.strptime(time.strftime("%x"), "%m/%d/%Y")
     timedelta = to_dt - from_dt
     diff_day = timedelta.days
     return tryInt(diff_day)
Ejemplo n.º 48
0
    def makeRelative(self):

        for static_type in self.paths:

            updates_paths = []
            for rel_path in self.paths.get(static_type):
                file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path)
                core_url = 'static/%s?%d' % (rel_path, tryInt(os.path.getmtime(file_path)))

                updates_paths.append(core_url)

            self.paths[static_type] = updates_paths
Ejemplo n.º 49
0
class NZBGet(Downloader):

    protocol = ['nzb']

    url = 'http://%(username)s:%(password)s@%(host)s/xmlrpc'

    def download(self, data = None, movie = None, filedata = None):
        if not movie: movie = {}
        if not data: data = {}

        if not filedata:
            log.error('Unable to get NZB file: %s', traceback.format_exc())
            return False

        log.info('Sending "%s" to NZBGet.', data.get('name'))

        url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
        nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))

        rpc = xmlrpclib.ServerProxy(url)
        try:
            if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
                log.debug('Successfully connected to NZBGet')
            else:
                log.info('Successfully connected to NZBGet, but unable to send a message')
        except socket.error:
            log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
            return False
        except xmlrpclib.ProtocolError, e:
            if e.errcode == 401:
                log.error('Password is incorrect.')
            else:
                log.error('Protocol Error: %s', e)
            return False

        if re.search(r"^0", rpc.version()):
            xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip()))
        else:
            xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip()))

        if xml_response:
            log.info('NZB sent successfully to NZBGet')
            nzb_id = md5(data['url']) # about as unique as they come ;)
            couchpotato_id = "couchpotato=" + nzb_id
            groups = rpc.listgroups()
            file_id = [item['LastID'] for item in groups if item['NZBFilename'] == nzb_name]
            confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id, file_id)
            if confirmed:
                log.debug('couchpotato parameter set in nzbget download')
            return self.downloadReturnId(nzb_id)
        else:
            log.error('NZBGet could not add %s to the queue.', nzb_name)
            return False
Ejemplo n.º 50
0
    def setCrons(self):

        fireEvent('schedule.remove', 'manage.update_library')
        refresh = tryInt(self.conf('library_refresh_interval'))
        if refresh > 0:
            fireEvent('schedule.interval',
                      'manage.update_library',
                      self.updateLibrary,
                      hours=refresh,
                      single=True)

        return True
Ejemplo n.º 51
0
    def parseResults(self, results, entries, year, quality, title, it_title):
        new = {}
        for result in entries:
            tds = result.findAll('td')

            try:
                new['age'], new['size'] = self.getTorrentInfo(tds[6].a['href'])
                new['url'] = tds[0].a['href']
                new['detail_url'] = tds[6].a['href']
                new['id'] = tds[6].a['href'].split('showtopic=')[1]
                new['name'] = self.standardize_title(tds[6].a.text, it_title,
                                                     title, year, quality,
                                                     self.desc)
                new['seeders'] = tryInt(tds[4].text)
                new['leechers'] = tryInt(tds[3].text)
                new['score'] = self.conf('extra_score') + 20
            except Exception as e:
                log.info(e)
                continue

            results.append(new)
Ejemplo n.º 52
0
    def getIMDBids(self):

        movies = []

        watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
        watchlist_urls = splitString(self.conf('automation_urls'))

        index = -1
        for watchlist_url in watchlist_urls:

            try:
                # Get list ID
                ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url)
                if len(ids) == 1:
                    watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[0]
                # Try find user id with watchlist
                else:
                    userids = re.findall('(ur\d{7,9})', watchlist_url)
                    if len(userids) == 1:
                        watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[0]
            except:
                log.error('Failed getting id from watchlist: %s', traceback.format_exc())

            index += 1
            if not watchlist_enablers[index]:
                continue

            start = 0
            while True:
                try:

                    w_url = '%s&start=%s' % (watchlist_url, start)
                    imdbs = self.getFromURL(w_url)

                    for imdb in imdbs:
                        if imdb not in movies:
                            movies.append(imdb)

                        if self.shuttingDown():
                            break

                    log.debug('Found %s movies on %s', (len(imdbs), w_url))

                    if len(imdbs) < 225:
                        break

                    start = len(movies)

                except:
                    log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc()))
                    break

        return movies
Ejemplo n.º 53
0
                    def extra_check(item):
                        parts = re.search(
                            'available:.(?P<parts>\d+)./.(?P<total>\d+)',
                            info.text)
                        total = tryInt(parts.group('total'))
                        parts = tryInt(parts.group('parts'))

                        if (total / parts) < 0.95 or (
                            (total / parts) >= 0.95
                                and not 'par2' in info.text.lower()):
                            log.info2(
                                'Wrong: \'%s\', not complete: %s out of %s',
                                (item['name'], parts, total))
                            return False

                        if 'requires password' in info.text.lower():
                            log.info2('Wrong: \'%s\', passworded',
                                      (item['name']))
                            return False

                        return True
Ejemplo n.º 54
0
 def _searchOnTitle(self, title, movie, quality, results):
     log.debug('Searching T411 for %s' % (title))
     url = self.urls['search'] % (title.replace(':', ''))
     try:
         output = self.getJsonData(url,cache_timeout = 30, headers = {"Authorization": self.token})
     except: pass
     for entry in output['torrents']:
         try:
             log.debug(entry)
             #log.debug("NAME: "+entry['name']+"  SIZE:  "+self.parseSize(str(tryInt(tryInt(entry['size'])/1024))+"kb"))
             results.append({
                 'id': entry['id'],
                 'name': entry['name'],
                 'url': self.urls['download'] % entry['id'],
                 'detail_url': self.urls['detail'] % entry['id'],
                 'size': self.parseSize(str(tryInt(tryInt(entry['size'])/1024))+"kb"),
                 'seeders': entry['seeders'],
                 'leechers': entry['leechers'],
                 })
         except:
             error = traceback.format_exc()
Ejemplo n.º 55
0
    def _searchOnTitle(self, title, movie, quality, results):
        log.debug('Searching T411 for %s' % (title))
        url = self.urls['search'] % (title.replace(':', ''))
        try:
            output = self.getJsonData(url,cache_timeout = 30, headers = {"Authorization": self.token})
        except: pass
        if 'torrents' in output:
            for entry in output['torrents']:

                #Calculate the age of the release
                try:
                    pubdate = entry['added']
                    pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S')
                    now = datetime.utcnow()
                    age = (now - pubdate).days
                except ValueError:
                    log.debug('T411: Bad age')
                    age = 0
                except:
                    log.warning('Something weird happen with the age')
                    age = 0

                #Produce the output
                try:
                    log.debug(entry)
                    #log.debug("NAME: "+entry['name']+"  SIZE:  "+self.parseSize(str(tryInt(tryInt(entry['size'])/1024))+"kb"))
                    results.append({
                        'id': entry['id'],
                        'name': entry['name'],
                        'url': self.urls['download'] % entry['id'],
                        'detail_url': self.urls['detail'] % entry['id'],
                        'size': self.parseSize(str(tryInt(tryInt(entry['size'])/1024))+"kb"),
                        'seeders': entry['seeders'],
                        'age': age,
                        'leechers': entry['leechers'],
                        })
                except:
                    error = traceback.format_exc()
        else:
            log.info('No torrent found for : %s' % (title))
Ejemplo n.º 56
0
    def partial(self, type='all', lines=30, offset=0, **kwargs):

        total_lines = tryInt(lines)
        offset = tryInt(offset)

        log_lines = []

        for x in range(0, 50):

            path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')

            # Check see if the log exists
            if not os.path.isfile(path):
                break

            f = open(path, 'r')
            log_content = toUnicode(f.read())
            raw_lines = self.toList(log_content)
            raw_lines.reverse()

            brk = False
            for line in raw_lines:

                if type == 'all' or line.get('type') == type.upper():
                    log_lines.append(line)

                if len(log_lines) >= (total_lines + offset):
                    brk = True
                    break

            if brk:
                break

        log_lines = log_lines[offset:]
        log_lines.reverse()

        return {
            'success': True,
            'log': log_lines,
        }
Ejemplo n.º 57
0
def sceneScore(nzb_name):

    check_names = [nzb_name]

    # Match names between "
    try:
        check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0))
    except:
        pass

    # Match longest name between []
    try:
        check_names.append(
            max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key=len).strip())
    except:
        pass

    for name in check_names:

        # Strip twice, remove possible file extensions
        name = name.lower().strip(' "\'\.-_\[\]')
        name = re.sub('\.([a-z0-9]{0,4})$', '', name)
        name = name.strip(' "\'\.-_\[\]')

        # Make sure year and groupname is in there
        year = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', name)
        group = re.findall('\-([a-z0-9]+)$', name)

        if len(year) > 0 and len(group) > 0:
            try:
                validate = fireEvent('release.validate', name, single=True)
                if validate and tryInt(validate.get('score')) != 0:
                    log.debug(
                        'Release "%s" scored %s, reason: %s',
                        (nzb_name, validate['score'], validate['reasons']))
                    return tryInt(validate.get('score'))
            except:
                log.error('Failed scoring scene: %s', traceback.format_exc())

    return 0
Ejemplo n.º 58
0
    def getIMDBids(self):

        movies = []

        enablers = [
            tryInt(x) for x in splitString(self.conf('automation_urls_use'))
        ]
        urls = splitString(self.conf('automation_urls'))

        namespace = 'http://www.w3.org/2005/Atom'
        namespace_im = 'http://itunes.apple.com/rss'

        index = -1
        for url in urls:

            index += 1
            if len(enablers
                   ) == 0 or len(enablers) < index or not enablers[index]:
                continue

            try:
                cache_key = 'itunes.rss.%s' % md5(url)
                rss_data = self.getCache(cache_key, url)

                data = XMLTree.fromstring(rss_data)

                if data is not None:
                    entry_tag = str(QName(namespace, 'entry'))
                    rss_movies = self.getElements(data, entry_tag)

                    for movie in rss_movies:
                        name_tag = str(QName(namespace_im, 'name'))
                        name = self.getTextElement(movie, name_tag)

                        releaseDate_tag = str(
                            QName(namespace_im, 'releaseDate'))
                        releaseDateText = self.getTextElement(
                            movie, releaseDate_tag)
                        year = datetime.datetime.strptime(
                            releaseDateText,
                            '%Y-%m-%dT00:00:00-07:00').strftime("%Y")

                        imdb = self.search(name, year)

                        if imdb and self.isMinimalMovie(imdb):
                            movies.append(imdb['imdb'])

            except:
                log.error('Failed loading iTunes rss feed: %s %s',
                          (url, traceback.format_exc()))

        return movies
Ejemplo n.º 59
0
    def _search(self, movie, quality, results):
        matched_category_ids = self.getCatId(quality)
        cat_id = my_string = ','.join(map(str, matched_category_ids))
        url = self.urls['search'] % self.buildUrl(movie, cat_id)
        data = self.getJsonData(url)

        try:
            for result in data:
                freeleech = tryInt(result['freeleech'])
                torrent_score = 1
                torrent_score += self.conf('extra_score')
                if freeleech:
                    torrent_score += self.conf('freeleech_score')
                elif self.conf('freeleech_only'):
                    continue
                results.append({
                    'id':
                    tryInt(result['id']),
                    'name':
                    result['name'],
                    'url':
                    self.urls['download'] %
                    (result['id'], self.conf('passkey')),
                    'detail_url':
                    self.urls['detail'] % result['id'],
                    'imdb_id':
                    result['imdb'],
                    'size':
                    tryInt(result['size']) / (1024 * 1024),
                    'seeders':
                    tryInt(result['seeders']),
                    'leechers':
                    tryInt(result['leechers']),
                    'score':
                    torrent_score,
                })
        except:
            log.error('Failed getting results from %s: %s' %
                      (self.getName(), traceback.format_exc()))
Ejemplo n.º 60
0
    def _searchOnTitle(self, title, movie, quality, results):

        params = tryUrlencode({
            'page':'torrents',
            'search': '%s %s' % (title, movie['library']['year']),
            'active': 1,
        })

        data = self.getHTMLData('%s?%s' % (self.urls['search'], params))

        if data:

            try:
                soup = BeautifulSoup(data)

                results_table = soup.find('table', attrs = {'id': 'bgtorrlist2'})
                entries = results_table.find_all('tr')

                for result in entries[2:len(entries) - 1]:
                    info_url = result.find(href = re.compile('torrent-details'))
                    download = result.find(href = re.compile('magnet:'))

                    if info_url and download:

                        url = parse_qs(info_url['href'])

                        results.append({
                            'id': url['id'][0],
                            'name': info_url.string,
                            'url': download['href'],
                            'detail_url': self.urls['detail'] % url['id'][0],
                            'size': self.parseSize(result.find_all('td')[7].string),
                            'seeders': tryInt(result.find_all('td')[4].string),
                            'leechers': tryInt(result.find_all('td')[5].string),
                            'get_more_info': self.getMoreInfo
                        })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))