예제 #1
0
파일: wwt.py 프로젝트: DarkSir23/mylar
    def wwt_data(self, data):

            resultw = data.find("table", {"class": "w3-table w3-striped w3-bordered w3-card-4"})
            resultp = resultw.findAll("tr")

            #final = []
            results = []
            for res in resultp:
                if res.findNext(text=True) == 'Torrents Name':
                    continue
                title = res.find('a')
                torrent = title['title']
                try:
                    for link in res.find_all('a', href=True):
                        if link['href'].startswith('download.php'):
                            linkurl = urlparse.parse_qs(urlparse.urlparse(link['href']).query)['id']
                            #results = {'torrent':  torrent,
                            #           'link':     link['href']}
                            break
                    for td in res.findAll('td'):
                        try:
                            seed = td.find("font", {"color": "green"})
                            leech = td.find("font", {"color": "#ff0000"})
                            value = td.findNext(text=True)
                            if any(['MB' in value, 'GB' in value]):
                                if 'MB' in value:
                                    szform = 'MB'
                                    sz = 'M'
                                else:
                                    szform = 'GB'
                                    sz = 'G'
                                size = helpers.human2bytes(str(re.sub(szform, '', value)).strip() + sz)
                            elif seed is not None:
                                seeders = value
                                #results['seeders'] = seeders
                            elif leech is not None:
                                leechers = value
                                #results['leechers'] = leechers
                            else:
                                age = value
                                #results['age'] = age
                        except Exception as e:
                            logger.warn('exception: %s' % e)

                    logger.info('age: %s' % age)
                    results.append({'title':    torrent,
                                    'link':     ''.join(linkurl),
                                    'pubdate':  self.string_to_delta(age),
                                    'size':     size,
                                    'site':     'WWT'})
                    logger.info('results: %s' % results)
                except Exception as e:
                    logger.warn('Error: %s' % e)
                    continue
                #else:
                #    final.append(results)

            return results
예제 #2
0
def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
    if pickfeed is None:
        return

    srchterm = None

    if seriesname:
        srchterm = re.sub(' ', '%20', seriesname)
    if issue:
        srchterm += '%20' + str(issue)

    if mylar.TPSE_PROXY:
        if mylar.TPSE_PROXY.endswith('/'):
            tpse_url = mylar.TPSE_PROXY
        else:
            tpse_url = mylar.TPSE_PROXY + '/'
    else:
        #switched to https.
        tpse_url = 'https://torrentproject.se/'

    #this is for the public trackers included thus far in order to properly cycle throught the correct ones depending on the search request
    # TPSE = search only
    # DEM = rss feed
    # WWT = rss feed
    if pickfeed == 'TPSE-SEARCH':
        pickfeed = '2'
        loopit = 1
    elif pickfeed == 'TPSE':
        #we need to cycle through both DEM + WWT feeds
        loopit = 2
    else:
        loopit = 1

    lp = 0
    totalcount = 0

    title = []
    link = []
    description = []
    seriestitle = []

    feeddata = []
    myDB = db.DBConnection()
    torthetpse = []
    torthe32p = []
    torinfo = {}

    while (lp < loopit):
        if lp == 0 and loopit == 2:
            pickfeed = '6'  #DEM RSS
        elif lp == 1 and loopit == 2:
            pickfeed = '999'  #WWT RSS
           
        feedtype = None

        if pickfeed == "1" and mylar.ENABLE_32P:  # 32pages new releases feed.
            feed = 'https://32pag.es/feeds.php?feed=torrents_all&user='******'user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey']
            feedtype = ' from the New Releases RSS Feed for comics'
            verify = bool(mylar.VERIFY_32P)
        elif pickfeed == "2" and srchterm is not None:    # TP.SE search / RSS
            feed = tpse_url + 'rss/' + str(srchterm) + '/'
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "3":    # TP.SE rss feed (3101 = comics category) / non-RSS
            feed = tpse_url + '?hl=en&safe=off&num=50&start=0&orderby=best&s=&filter=3101'
            feedtype = ' from the New Releases RSS Feed for comics from TP.SE'
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "4":    #32p search
            if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
                logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.')
                lp=+1
                continue
            if mylar.MODE_32P == 0:
                logger.warn('[32P] Searching is not available in 32p Legacy mode. Switch to Auth mode to use the search functionality.')
                lp=+1
                continue
            return
        elif pickfeed == "5" and srchterm is not None:  # demonoid search / non-RSS
            feed = 'https://www.dnoid.me/' + "files/?category=10&subcategory=All&language=0&seeded=2&external=2&query=" + str(srchterm) + "&uid=0&out=rss"
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "6":    # demonoid rss feed 
            feed = 'https://www.dnoid.me/rss/10.xml'
            feedtype = ' from the New Releases RSS Feed from Demonoid'
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "999":    #WWT rss feed
            feed = 'https://www.worldwidetorrents.eu/rss.php?cat=132,50'
            feedtype = ' from the New Releases RSS Feed from WorldWideTorrents'
        elif int(pickfeed) >= 7 and feedinfo is not None:
            #personal 32P notification feeds.
            #get the info here
            feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user='******'user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname']
            feedtype = ' from your Personal Notification Feed : ' + feedinfo['feedname']
            verify = bool(mylar.VERIFY_32P)
        else:
            logger.error('invalid pickfeed denoted...')
            return

        if pickfeed == '2' or pickfeed == '3':
            picksite = 'TPSE'
            #if pickfeed == '2':
            #    feedme = tpse.            
        elif pickfeed == '5' or pickfeed == '6':
            picksite = 'DEM'
            #if pickfeed == '5':
            #    feedme = dem.
        elif pickfeed == '999':
            picksite = 'WWT'
        elif pickfeed == '1' or pickfeed == '4' or int(pickfeed) > 7:
            picksite = '32P'

        if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']):
            payload = None

            ddos_protection = round(random.uniform(0,15),2)
            time.sleep(ddos_protection)

            try:
                cf_cookievalue = None
                scraper = cfscrape.create_scraper()
                if pickfeed == '2':
                    cf_cookievalue, cf_user_agent = scraper.get_tokens(feed)
                    headers = {'Accept-encoding': 'gzip',
                               'User-Agent':       cf_user_agent}

                if cf_cookievalue:
                    r = scraper.get(feed, verify=verify, cookies=cf_cookievalue, headers=headers)
                else:
                    r = scraper.get(feed, verify=verify)#requests.get(feed, params=payload, verify=verify)
            except Exception, e:
                logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
                lp+=1
                continue

            feedme = feedparser.parse(r.content)
            #logger.info(feedme)   #<-- uncomment this to see what Mylar is retrieving from the feed

        i = 0

        if pickfeed == '4':
            for entry in searchresults['entries']:
                justdigits = entry['file_size'] #size not available in follow-list rss feed
                seeddigits = entry['seeders']  #number of seeders not available in follow-list rss feed

                if int(seeddigits) >= int(mylar.MINSEEDS):
                    torthe32p.append({
                                    'site':     picksite,
                                    'title':    entry['torrent_seriesname'].lstrip() + ' ' + entry['torrent_seriesvol'] + ' #' + entry['torrent_seriesiss'],
                                    'volume':   entry['torrent_seriesvol'],      # not stored by mylar yet.
                                    'issue':    entry['torrent_seriesiss'],    # not stored by mylar yet.
                                    'link':     entry['torrent_id'],  #just the id for the torrent
                                    'pubdate':  entry['pubdate'],
                                    'size':     entry['file_size'],
                                    'seeders':  entry['seeders'],
                                    'files':    entry['num_files']
                                    })
                i += 1
        elif pickfeed == '3':
            #TP.SE RSS FEED (parse)
            pass
        elif pickfeed == '5':
            #DEMONOID SEARCH RESULT (parse)
            pass
        elif pickfeed == "999":
            try:
                feedme = feedparser.parse(feed)
            except Exception, e:
                logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
                lp+=1
                continue

            #WWT / FEED
            for entry in feedme.entries:
                tmpsz = entry.description
                tmpsz_st = tmpsz.find('Size:') + 6
                if 'GB' in tmpsz[tmpsz_st:]:
                    szform = 'GB'
                    sz = 'G'
                elif 'MB' in tmpsz[tmpsz_st:]:
                    szform = 'MB'
                    sz = 'M'
                linkwwt = urlparse.parse_qs(urlparse.urlparse(entry.link).query)['id']
                feeddata.append({
                                'site':     picksite,
                                'title':    entry.title,
                                'link':     ''.join(linkwwt),
                                'pubdate':  entry.updated,
                                'size':     helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz))   #+ 2 is for the length of the MB/GB in the size.
                                })
                i+=1
예제 #3
0
                #DEMONOID / FEED
                elif pickfeed == "6":
                    tmpsz = feedme.entries[i].description
                    tmpsz_st = tmpsz.find('Size:') + 6
                    if 'GB' in tmpsz[tmpsz_st:]:
                        szform = 'GB'
                        sz = 'G'
                    elif 'MB' in tmpsz[tmpsz_st:]:
                        szform = 'MB'
                        sz = 'M'
                    feeddata.append({
                                    'site':     picksite,
                                    'title':    feedme.entries[i].title,
                                    'link':     str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[1]),
                                    'pubdate':  feedme.entries[i].updated,
                                    'size':     helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz)),
                                    })

                #32p / FEEDS
                elif pickfeed == "1" or int(pickfeed) > 7:
                    tmpdesc = feedme.entries[i].description
                    st_pub = feedme.entries[i].title.find('(')
                    st_end = feedme.entries[i].title.find(')')
                    pub = feedme.entries[i].title[st_pub +1:st_end] # +1 to not include (
                    #logger.fdebug('publisher: ' + re.sub("'",'', pub).strip())  #publisher sometimes is given within quotes for some reason, strip 'em.
                    vol_find = feedme.entries[i].title.find('vol.')
                    series = feedme.entries[i].title[st_end +1:vol_find].strip()
                    series = re.sub('&amp;', '&', series).strip()
                    #logger.fdebug('series title: ' + series)
                    iss_st = feedme.entries[i].title.find(' - ', vol_find)
                    vol = re.sub('\.', '', feedme.entries[i].title[vol_find:iss_st]).strip()
예제 #4
0
파일: wwt.py 프로젝트: xeddmc/mylar
    def wwt_data(self, data):

        resultw = data.find(
            "table", {"class": "w3-table w3-striped w3-bordered w3-card-4"})
        resultp = resultw.findAll("tr")

        #final = []
        results = []
        for res in resultp:
            if res.findNext(text=True) == 'Torrents Name':
                continue
            title = res.find('a')
            torrent = title['title']
            try:
                for link in res.find_all('a', href=True):
                    if link['href'].startswith('download.php'):
                        linkurl = urlparse.parse_qs(
                            urlparse.urlparse(link['href']).query)['id']
                        #results = {'torrent':  torrent,
                        #           'link':     link['href']}
                        break
                for td in res.findAll('td'):
                    try:
                        seed = td.find("font", {"color": "green"})
                        leech = td.find("font", {"color": "#ff0000"})
                        value = td.findNext(text=True)
                        if any(['MB' in value, 'GB' in value]):
                            if 'MB' in value:
                                szform = 'MB'
                                sz = 'M'
                            else:
                                szform = 'GB'
                                sz = 'G'
                            size = helpers.human2bytes(
                                str(re.sub(szform, '', value)).strip() + sz)
                        elif seed is not None:
                            seeders = value
                            #results['seeders'] = seeders
                        elif leech is not None:
                            leechers = value
                            #results['leechers'] = leechers
                        else:
                            age = value
                            #results['age'] = age
                    except Exception as e:
                        logger.warn('exception: %s' % e)

                logger.info('age: %s' % age)
                results.append({
                    'title': torrent,
                    'link': ''.join(linkurl),
                    'pubdate': self.string_to_delta(age),
                    'size': size,
                    'site': 'WWT'
                })
                logger.info('results: %s' % results)
            except Exception as e:
                logger.warn('Error: %s' % e)
                continue
            #else:
            #    final.append(results)

        return results
예제 #5
0
def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
    if pickfeed is None:
        return

    srchterm = None

    if seriesname:
        srchterm = re.sub(' ', '%20', seriesname)
    if issue:
        srchterm += '%20' + str(issue)

    if mylar.TPSE_PROXY:
        if mylar.TPSE_PROXY.endswith('/'):
            tpse_url = mylar.TPSE_PROXY
        else:
            tpse_url = mylar.TPSE_PROXY + '/'
    else:
        #switched to https.
        tpse_url = 'https://torrentproject.se/'

    #this is for the public trackers included thus far in order to properly cycle throught the correct ones depending on the search request
    # TPSE = search only
    # DEM = rss feed
    # WWT = rss feed
    if pickfeed == 'TPSE-SEARCH':
        pickfeed = '2'
        loopit = 1
    elif pickfeed == 'TPSE':
        #we need to cycle through both DEM + WWT feeds
        loopit = 2
    else:
        loopit = 1

    lp = 0
    totalcount = 0

    title = []
    link = []
    description = []
    seriestitle = []

    feeddata = []
    myDB = db.DBConnection()
    torthetpse = []
    torthe32p = []
    torinfo = {}

    while (lp < loopit):
        if lp == 0 and loopit == 2:
            pickfeed = '6'  #DEM RSS
        elif lp == 1 and loopit == 2:
            pickfeed = '999'  #WWT RSS
           
        feedtype = None

        if pickfeed == "1" and mylar.ENABLE_32P:  # 32pages new releases feed.
            feed = 'https://32pag.es/feeds.php?feed=torrents_all&user='******'user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey']
            feedtype = ' from the New Releases RSS Feed for comics'
            verify = bool(mylar.VERIFY_32P)
        elif pickfeed == "2" and srchterm is not None:    # TP.SE search / RSS
            feed = tpse_url + 'rss/' + str(srchterm) + '/'
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "3":    # TP.SE rss feed (3101 = comics category) / non-RSS
            feed = tpse_url + '?hl=en&safe=off&num=50&start=0&orderby=best&s=&filter=3101'
            feedtype = ' from the New Releases RSS Feed for comics from TP.SE'
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "4":    #32p search
            if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
                logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.')
                lp=+1
                continue
            if mylar.MODE_32P == 0:
                logger.warn('[32P] Searching is not available in 32p Legacy mode. Switch to Auth mode to use the search functionality.')
                lp=+1
                continue
            return
        elif pickfeed == "5" and srchterm is not None:  # demonoid search / non-RSS
            feed = 'https://www.dnoid.me/' + "files/?category=10&subcategory=All&language=0&seeded=2&external=2&query=" + str(srchterm) + "&uid=0&out=rss"
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "6":    # demonoid rss feed 
            feed = 'https://www.dnoid.me/rss/10.xml'
            feedtype = ' from the New Releases RSS Feed from Demonoid'
            verify = bool(mylar.TPSE_VERIFY)
        elif pickfeed == "999":    #WWT rss feed
            feed = 'https://www.worldwidetorrents.eu/rss.php?cat=132,50'
            feedtype = ' from the New Releases RSS Feed from WorldWideTorrents'
        elif int(pickfeed) >= 7 and feedinfo is not None:
            #personal 32P notification feeds.
            #get the info here
            feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user='******'user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname']
            feedtype = ' from your Personal Notification Feed : ' + feedinfo['feedname']
            verify = bool(mylar.VERIFY_32P)
        else:
            logger.error('invalid pickfeed denoted...')
            return

        if pickfeed == '2' or pickfeed == '3':
            picksite = 'TPSE'
            #if pickfeed == '2':
            #    feedme = tpse.            
        elif pickfeed == '5' or pickfeed == '6':
            picksite = 'DEM'
            #if pickfeed == '5':
            #    feedme = dem.
        elif pickfeed == '999':
            picksite = 'WWT'
        elif pickfeed == '1' or pickfeed == '4' or int(pickfeed) > 7:
            picksite = '32P'

        if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']):
            payload = None

            ddos_protection = round(random.uniform(0,15),2)
            time.sleep(ddos_protection)

            try:
                cf_cookievalue = None
                scraper = cfscrape.create_scraper()
                if pickfeed == '2':
                    cf_cookievalue, cf_user_agent = scraper.get_tokens(feed)
                    headers = {'Accept-encoding': 'gzip',
                               'User-Agent':       cf_user_agent}

                if cf_cookievalue:
                    r = scraper.get(feed, verify=verify, cookies=cf_cookievalue, headers=headers)
                else:
                    r = scraper.get(feed, verify=verify)#requests.get(feed, params=payload, verify=verify)
            except Exception, e:
                logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
                lp+=1
                continue

            feedme = feedparser.parse(r.content)
            #logger.info(feedme)   #<-- uncomment this to see what Mylar is retrieving from the feed

        i = 0

        if pickfeed == '4':
            for entry in searchresults['entries']:
                justdigits = entry['file_size'] #size not available in follow-list rss feed
                seeddigits = entry['seeders']  #number of seeders not available in follow-list rss feed

                if int(seeddigits) >= int(mylar.MINSEEDS):
                    torthe32p.append({
                                    'site':     picksite,
                                    'title':    entry['torrent_seriesname'].lstrip() + ' ' + entry['torrent_seriesvol'] + ' #' + entry['torrent_seriesiss'],
                                    'volume':   entry['torrent_seriesvol'],      # not stored by mylar yet.
                                    'issue':    entry['torrent_seriesiss'],    # not stored by mylar yet.
                                    'link':     entry['torrent_id'],  #just the id for the torrent
                                    'pubdate':  entry['pubdate'],
                                    'size':     entry['file_size'],
                                    'seeders':  entry['seeders'],
                                    'files':    entry['num_files']
                                    })
                i += 1
        elif pickfeed == '3':
            #TP.SE RSS FEED (parse)
            pass
        elif pickfeed == '5':
            #DEMONOID SEARCH RESULT (parse)
            pass
        elif pickfeed == "999":
            try:
                feedme = feedparser.parse(feed)
            except Exception, e:
                logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
                lp+=1
                continue

            #WWT / FEED
            for entry in feedme.entries:
                tmpsz = entry.description
                tmpsz_st = tmpsz.find('Size:') + 6
                if 'GB' in tmpsz[tmpsz_st:]:
                    szform = 'GB'
                    sz = 'G'
                elif 'MB' in tmpsz[tmpsz_st:]:
                    szform = 'MB'
                    sz = 'M'
                linkwwt = urlparse.parse_qs(urlparse.urlparse(entry.link).query)['id']
                feeddata.append({
                                'site':     picksite,
                                'title':    entry.title,
                                'link':     ''.join(linkwwt),
                                'pubdate':  entry.updated,
                                'size':     helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz))   #+ 2 is for the length of the MB/GB in the size.
                                })
                i+=1
예제 #6
0
                #DEMONOID / FEED
                elif pickfeed == "6":
                    tmpsz = feedme.entries[i].description
                    tmpsz_st = tmpsz.find('Size:') + 6
                    if 'GB' in tmpsz[tmpsz_st:]:
                        szform = 'GB'
                        sz = 'G'
                    elif 'MB' in tmpsz[tmpsz_st:]:
                        szform = 'MB'
                        sz = 'M'
                    feeddata.append({
                                    'site':     picksite,
                                    'title':    feedme.entries[i].title,
                                    'link':     str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[1]),
                                    'pubdate':  feedme.entries[i].updated,
                                    'size':     helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz)),
                                    })

                #32p / FEEDS
                elif pickfeed == "1" or int(pickfeed) > 7:
                    tmpdesc = feedme.entries[i].description
                    st_pub = feedme.entries[i].title.find('(')
                    st_end = feedme.entries[i].title.find(')')
                    pub = feedme.entries[i].title[st_pub +1:st_end] # +1 to not include (
                    #logger.fdebug('publisher: ' + re.sub("'",'', pub).strip())  #publisher sometimes is given within quotes for some reason, strip 'em.
                    vol_find = feedme.entries[i].title.find('vol.')
                    series = feedme.entries[i].title[st_end +1:vol_find].strip()
                    series = re.sub('&amp;', '&', series).strip()
                    #logger.fdebug('series title: ' + series)
                    iss_st = feedme.entries[i].title.find(' - ', vol_find)
                    vol = re.sub('\.', '', feedme.entries[i].title[vol_find:iss_st]).strip()