Exemple #1
0
    def search(self, search_string, season=False, episode=False):
        #http://bitsnoop.com/search/all/supernatural+s01e01+OR+1x01/c/d/1/?fmt=rss

        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        query = search_string
        encoded_search = urllib.quote(query)
        url = 'http://bitsnoop.com/search/all/{}/c/d/1/?fmt=rss'
        full_url = url.format(encoded_search)

        parsed = feedparser.parse(full_url)
        header = [
            [search_string, full_url],
            ['Name', 'Size', 'Date', 'Seeds'],
            [0, 10, 6, 6],
            ['<', '>', '=', '>']]
        show_data = []

        for show in parsed['entries']:
            #pp(show)

            if show['published_parsed']:
                dt = datetime.fromtimestamp(mktime(show['published_parsed']))
                date = dt.strftime('%b %d/%Y')
            else:
                date = '-'
            size = U.pretty_filesize (show['size'])
            title = show['title']
            seeds = show['numseeders']
            magnet = show['magneturi']

            show_data.append([
                title,
                size,
                date,
                seeds,
                magnet
            ])

        show_data.sort(key=lambda x: int(x[3]), reverse=True) # sort by seeds
        return [header] + [show_data]
    def search(self, search_string, season=False, episode=False):

        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        query = search_string
        encoded_search = urllib.quote (query)
        url = '{}/usearch/%s/?rss=1&field=seeders&sorder=desc'.format(self.provider_url)
        full_url = url % encoded_search

        parsed = feedparser.parse(full_url)
        header = [
            [search_string, full_url],
            ['Name', 'Size', 'Date', 'Seeds'],
            [0, 10, 12, 6],
            ['<', '>', '<', '>']]
        show_data = []

        for show in parsed['entries']:
            dt = datetime.fromtimestamp(mktime(show['published_parsed']))
            date = dt.strftime('%b %d/%Y')

            size = U.pretty_filesize (show['torrent_contentlength'])

            # title = U.snip (show['title'].ljust (title_w), title_w)
            # title = title.replace ('avi', U.fg_color ('green', 'avi'))
            title = show['title']

            show_data.append([
                title,                    # title
                size,                     # show size
                date,                     # date
                show['torrent_seeds'],    # seeds
                show['torrent_magneturi'] # id (download magnet url)
            ])

        #show_data.sort(key=lambda x: int(x[3]), reverse=True) # sort by seeds
        return [header] + [show_data]
    def search(self, search_string, season=False, episode=False):

        """
        Default Search: Our default is prefix match
        Search 123 will match 123, 1234, 1234abcdefg
        Search 123 will not match 0123, ab123, ab123yz

        AND search:
        -----------
        the words hello and world:
        hello world

        NOT search:
        -----------
        the word hello but NOT the word world:
        hello -world

        We can't do NOT only search
        -world

        OR search:
        ----------
        the words hello or world:
        hello or world

        Each "or" is treated as new query part
        hello abcd or hello efgh != hello abcd or efgh

        grouping:
        ---------
        the exact phrase hello world:
        "hello world"
        """

        if season and episode:
            search_string = "%s" % (self.se_ep(season, episode, search_string))

        # print search_string
        url = "http://www.nzbclub.com/nzbrss.aspx?"
        query = {
            "q": search_string,
            "ig": 2,  # hide adult: 1=yes, 2=no
            "szs": 15,  # min size: 15=75m, 16=100m,
            "sze": 24,  # max size: 24=2gig
            "st": 5,  # sort.  5=relevence, 4=size (smallest first)
            "ns": 1,  # no spam
            "sp": 1,  # don't show passworded files
            "nfo": 0,  # has to have nfo  1=yes, 0=no
        }
        full_url = url + urllib.urlencode(query)
        parsed = feedparser.parse(full_url)

        header = [[search_string, full_url], ["Name", "Date", "Size"], [0, 12, 10], ["<", "<", ">"]]

        show_data = []
        for show in parsed["entries"]:
            dt = datetime.fromtimestamp(mktime(show["published_parsed"]))
            date = dt.strftime("%b %d/%Y")

            size = U.pretty_filesize(show["links"][0]["length"])

            show_data.append([show["title"], date, size, show["links"][0]["href"]])  # id

        return [header] + [show_data]
    def search(self, search_string, season=False, episode=False):
        """
        Search options:
        ---------------
        Minimum age:	 days
        Sort by:         agedesc, age, sizedesc, size
        Minimum size:	 MB
        Maximum size:	 MB
        Default query:
        Poster:
        NFO content:
        Has NFO file
        Hide crossposts
        Show complete releases only  1
        Hide possible spam           1


        Searching
        ---------

        In the default search form you can enter search terms you would like
        to search for. After searching the results shown will match ALL search
        terms entered. For example, if you search for

        foo bar

        all search results will contain both 'foo' AND 'bar'.

        Instead of using ALL terms you can also search for only one (or a few)
        of the terms. To do this you need to use the '|' symbol. For example,
        to search for either 'foo' or 'bar' you can enter the search term

        foo | bar

        On some occasions you would like to exclude results with certain words
        from your search. For example, if you want the word 'foo', but not
        'bar' in your search results you can enter

        foo -bar

        The last option we offer is to search for a sentence that should match
        exactly. In case of 'foo bar' these two words will not necessarily
        turn up next to each other in the search results. If you would like to
        search for some group of words you can put double-quotes around the
        terms. When you need 'foo' and 'bar' to be next to each other the
        correct search would be:

        "foo bar"

        You can't use more than 20 keywords when searching.



        Top level of data structure returned from NZBIndex:
        ---------------------------------------------------
        bozo        ?
        encoding    u'UTF-8'
        entries     all shows                           results['entries'][0...x]
        feed        info about the feed                 results['feed']
        headers
        href        the rss link for these results
        namespaces  ?
        status      200
        version     u'rss20'


        Example of one show entry (results['entries'][0]):
        --------------------------------------------------

          summary_detail
          ----------------
          {'base': u'http://nzbindex.com/rss/?q=American+Horror+Story+S02E09&sort=agedesc&maxsize=5000&minage=0&complete=1&minsize=100&max=100&more=1',
           'type': u'text/html', 'value': u'<p><font color="gray">alt.binaries.boneless, alt.binaries.cores, alt.binaries.multimedia, alt.binaries.town</font><br />\n<b>1.15 GB</b><br />\n19.6 dagen<br />\n<font color="#3DA233">16 bestanden (3098 delen)</font>\n<font color="gray">door Profess0r &lt;[email protected]&gt;</font><br />\n<font color="#E2A910">\n5 PAR2 | 11 ARCHIEF</font>\n</p>',
           'language': None}

          published_parsed
          ----------------
          time.struct_time(tm_year=2012, tm_mon=12, tm_mday=13, tm_hour=4, tm_min=58, tm_sec=19, tm_wday=3, tm_yday=348, tm_isdst=0)

          links
          ----------------
          [{'href': u'http://nzbindex.com/release/80986743/TOWNwww.town.ag-partner-of-www.ssl-news.info-American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION-0116-American.Horror.Story.S02E09.720p.HDTV.X2.nzb',
            'type': u'text/html',
            'rel': u'alternate'},
           {'length': u'1233191350',
            'href': u'http://nzbindex.com/download/80986743/TOWNwww.town.ag-partner-of-www.ssl-news.info-American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION-0116-American.Horror.Story.S02E09.720p.HDTV.X2.nzb',
            'type': u'text/xml',
            'rel': u'enclosure'}]

          title
          ----------------
          <TOWN><www.town.ag > <partner of www.ssl-news.info > American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION [01/16] - "American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION.par2" - 1,11 GB - yEnc

          tags
          ----------------
          [{'term': u'alt.binaries.boneless', 'scheme': None, 'label': None},
           {'term': u'alt.binaries.cores', 'scheme': None, 'label': None},
           {'term': u'alt.binaries.multimedia', 'scheme': None, 'label': None},
           {'term': u'alt.binaries.town', 'scheme': None, 'label': None}]

          summary
          ----------------
          <p><font color="gray">alt.binaries.boneless, alt.binaries.cores, alt.binaries.multimedia, alt.binaries.town</font><br />
          <b>1.15 GB</b><br />
          19.6 dagen<br />
          <font color="#3DA233">16 bestanden (3098 delen)</font>
          <font color="gray">door Profess0r &lt;[email protected]&gt;</font><br />
          <font color="#E2A910">
          5 PAR2 | 11 ARCHIEF</font>
          </p>

          guidislink
          ----------------
          False

          title_detail
          ----------------
          {'base': u'http://nzbindex.com/rss/?q=American+Horror+Story+S02E09&sort=agedesc&maxsize=5000&minage=0&complete=1&minsize=100&max=100&more=1',
           'type': u'text/plain',
           'value': u'<TOWN><www.town.ag > <partner of www.ssl-news.info > American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION [01/16] - "American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION.par2" - 1,11 GB - yEnc',
           'language': None}

          link
          ----------------
          http://nzbindex.com/release/80986743/TOWNwww.town.ag-partner-of-www.ssl-news.info-American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION-0116-American.Horror.Story.S02E09.720p.HDTV.X2.nzb

          published
          ----------------
          Thu, 13 Dec 2012 05:58:19 +0100

          id
          ----------------
          http://nzbindex.com/release/80986743/TOWNwww.town.ag-partner-of-www.ssl-news.info-American.Horror.Story.S02E09.720p.HDTV.X264-DIMENSION-0116-American.Horror.Story.S02E09.720p.HDTV.X2.nzb


          """

        # search_template = ('nzbindex.com/rss/?q=%s&minage=%s&sort=%s' +
        # '&minsize=%s&maxsize=%s&complete=%s&max=%s&more=1')

        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        search_term = ''
        min_age = '0'
        sort = 'agedesc'  # age, agedesc, size, sizedesc
        min_size = '100'  # mb
        max_size = '1000'  # mb
        complete_only = '1'  # return only complete posts
        max_results = '100'

        url = 'http://nzbindex.com/rss/?'
        query = {
            'q': search_string
            , 'minage': '0'
            , 'sort': 'agedesc'
            , 'minsize': '100'
            , 'maxsize': '5000'
            , 'complete': '1'
            , 'max': '100'  # results per page
            , 'more': '1'
        }

        full_url = url + urllib.urlencode(query)

        # print 'searching...'
        parsed = feedparser.parse(full_url)

        show_data = []
        for show in parsed['entries']:
            dt = datetime.fromtimestamp(mktime(show['published_parsed']))
            date = dt.strftime('%b %d/%Y')

            size = U.pretty_filesize(show['links'][1]['length'])

            show_data.append([
                show['title'],
                date,
                size,
                show['links'][1]['href']
            ])

        header = [
            #'%s  (%s)' % (search_string, self.provider_url),
            [search_string, full_url],
            ['Name', 'Date', 'Size'],
            [0, 12, 10],
            ['<', '<', '>']
        ]

        return [header] + [show_data]