Example #1
0
    def format_data(self, xml_fragment, idx):
        show_data = []
        for show in xml_fragment:
            title = show.find('title').text
            date = show.find('pubDate').text
            date = datetime.datetime.strptime(date, '%a, %d %b %Y %X %z')
            date = date.strftime('%b %d/%Y')

            size = guid = None
            # extract the elements that look like:
            # <newznab:attr name="XXXX" value="XXXX">
            attrs = [dict(i.items()) for i in show if 'name' in i.keys()]
            for attr in attrs:
                if attr['name'] == 'size':
                    size = attr['value']
                    size = U.pretty_filesize(size)
                if attr['name'] == 'guid':
                    guid = attr['value']

            # the guid is sometimes not in a <newznab:attr.../> tag
            if not guid:
                guid = show.find('guid')
                guid = guid.text.split('/')[-1]

            show_data.append([
                title,
                size,
                date,
                self.shortname,
                '%s|%s' % (idx, guid),
            ])
        return show_data
Example #2
0
    def format_data(self, xml_fragment, idx):
        show_data = []
        for show in xml_fragment:
            title = show.find('title').text
            date = show.find('pubDate').text
            date = datetime.datetime.strptime(date, '%a, %d %b %Y %X %z')
            date = date.strftime('%b %d/%Y')

            size = guid = None
            # extract the elements that look like:
            # <newznab:attr name="XXXX" value="XXXX">
            attrs = [dict(i.items()) for i in show if 'name' in i.keys()]
            for attr in attrs:
                if attr['name'] == 'size':
                    size = attr['value']
                    size = U.pretty_filesize(size)
                if attr['name'] == 'guid':
                    guid = attr['value']

            # the guid is sometimes not in a <newznab:attr.../> tag
            if not guid:
                guid = show.find('guid')
                guid = guid.text.split('/')[-1]

            show_data.append([
                title,
                size,
                date,
                self.shortname,
                '%s|%s' % (idx, guid),
            ])
        return show_data
Example #3
0
    def search(self, search_string, season=False, episode=False):
        if season and episode:
            search_string = "%s" % (self.se_ep(season, episode, search_string))

        query = search_string
        encoded_search = urllib.parse.quote(query)

        show_data = []
        for try_url in self.provider_urls:
            # cid=0 everything, cid=8 tv shows:
            lookfor = 0
            if season and episode:
                lookfor = 8  # tv only

            url = "{}/rss.xml?type=search&cid={}&search=%s".format(try_url, lookfor)
            full_url = url % encoded_search
            self.url = full_url

            parsed = feedparser.parse(full_url)
            if "bozo_exception" in parsed:
                continue

            if len(parsed["entries"]) == 0:
                continue

            for show in parsed["entries"]:
                try:
                    dt = datetime.fromtimestamp(mktime(show["published_parsed"]))
                    date = dt.strftime("%b %d/%Y")
                    size = U.pretty_filesize(show["size"])
                    title = show["title"]
                except KeyError as e:
                    continue

                # extratorrent returns results that match any word in the
                # search, so the results end up with a bunch of stuff we aren't
                # interested in and we need to filter them out.
                stop = False
                for i in search_string.split(" "):
                    if i.lower() not in title.lower():
                        stop = True
                if stop:
                    continue

                # the ExtraTorrent rss feed doesn't supply the magnet link, or any
                # usable links (They must be downloaded from the site).  But the
                # feed has the URN hash, so we can build a magnet link from that.
                magnet_url = "magnet:?xt=urn:btih:{}&dn={}"
                magnet_hash = show["info_hash"]
                magnet = magnet_url.format(magnet_hash, urllib.parse.quote(title))
                seeds = show["seeders"]
                if seeds == "---":
                    seeds = "0"

                show_data.append([title, size, date, seeds, self.shortname, magnet])

            return show_data

        return show_data
    def search(self, search_string, season=False, episode=False):

        if season and episode:
            searches = self.se_ep(search_string, season, episode)
        else:
            searches = [search_string]

        # http://www.torrentdownloads.me/rss.xml?type=search&search=doctor+who+s05e01
        base_url = '%s/rss.xml?type=search&search={}' % self.provider_urls[0]
        show_data = []
        for search in searches:
            encoded_search = urllib.parse.quote(search)
            url = base_url.format(encoded_search)
            parsed = feedparser.parse(url)

            if len(parsed) == 0:
                continue

            for show in parsed['entries']:
                
                dt = datetime.fromtimestamp(mktime(show['published_parsed']))
                date = dt.strftime('%b %d/%Y')
                size = U.pretty_filesize(show['size'])
                title = show['title']
                seeds = show['seeders']
                magnet_url = 'magnet:?xt=urn:btih:{}&dn={}'
                magnet_hash = show['info_hash']
                magnet = magnet_url.format(magnet_hash, urllib.parse.quote(title))

                # torrentdownloads returns results that match any word in the
                # search, so the results end up with a bunch of stuff we aren't
                # interested in and we need to filter them out.
                stop = False
                for i in search.split(' '):
                    if i.lower() not in title.lower():
                        stop = True
                if stop:
                    continue
                
                show_data.append([
                    title,
                    size,
                    date,
                    seeds,
                    self.short_name,
                    magnet
                ])
                
        return show_data
Example #5
0
    def search(self, search_string, season=False, episode=False):

        if season and episode:
            searches = self.se_ep(search_string, season, episode)
        else:
            searches = [search_string]

        # http://www.torrentdownloads.me/rss.xml?type=search&search=doctor+who+s05e01
        base_url = '%s/rss.xml?type=search&search={}' % self.provider_urls[0]
        show_data = []
        for search in searches:
            encoded_search = urllib.parse.quote(search)
            url = base_url.format(encoded_search)
            parsed = feedparser.parse(url)

            if len(parsed) == 0:
                continue

            for show in parsed['entries']:

                dt = datetime.fromtimestamp(mktime(show['published_parsed']))
                date = dt.strftime('%b %d/%Y')
                size = U.pretty_filesize(show['size'])
                title = show['title']
                seeds = show['seeders']
                magnet_url = 'magnet:?xt=urn:btih:{}&dn={}'
                magnet_hash = show['info_hash']
                magnet = magnet_url.format(magnet_hash,
                                           urllib.parse.quote(title))

                # torrentdownloads returns results that match any word in the
                # search, so the results end up with a bunch of stuff we aren't
                # interested in and we need to filter them out.
                stop = False
                for i in search.split(' '):
                    if i.lower() not in title.lower():
                        stop = True
                if stop:
                    continue

                show_data.append(
                    [title, size, date, seeds, self.short_name, magnet])

        return show_data
Example #6
0
    def search(self, search_string, season=False, episode=False):
        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        query = search_string
        encoded_search = urllib.parse.quote(query)

        socket.setdefaulttimeout(Config.timeout)
        show_data = []
        for try_url in self.provider_urls:
            url = '%s/search/all/{}/c/d/1/?fmt=rss' % (try_url)
            full_url = url.format(encoded_search)
            self.url = full_url
            #click.echo('>', full_url)

            parsed = feedparser.parse(full_url)
            if len(parsed['entries']) == 0:
                continue

            for show in parsed['entries']:
                if show['published_parsed']:
                    dt = datetime.fromtimestamp(mktime(show['published_parsed']))
                    date = dt.strftime('%b %d/%Y')
                else:
                    date = '-'
                size = U.pretty_filesize (show['size'])
                title = show['title']
                seeds = show['numseeders']
                magnet = show['magneturi']

                show_data.append([
                    title,
                    size,
                    date,
                    seeds,
                    self.shortname,
                    magnet
                ])

            return show_data

        return show_data
Example #7
0
    def search(self, search_string, season=False, episode=False):
        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        query = search_string
        encoded_search = urllib.parse.quote(query)

        show_data = []
        for try_url in self.provider_urls:
            url = '%s/search/all/{}/c/d/1/?fmt=rss' % (try_url)
            full_url = url.format(encoded_search)
            self.url = full_url
            #click.echo('>', full_url)

            parsed = feedparser.parse(full_url)
            if len(parsed['entries']) == 0:
                continue

            for show in parsed['entries']:
                if show['published_parsed']:
                    dt = datetime.fromtimestamp(mktime(show['published_parsed']))
                    date = dt.strftime('%b %d/%Y')
                else:
                    date = '-'
                size = U.pretty_filesize (show['size'])
                title = show['title']
                seeds = show['numseeders']
                magnet = show['magneturi']

                show_data.append([
                    title,
                    size,
                    date,
                    seeds,
                    self.shortname,
                    magnet
                ])

            return show_data

        return show_data
Example #8
0
    def search(self, search_string, season=False, episode=False):

        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        query = search_string
        encoded_search = urllib.parse.quote (query)
        show_data = []
        for try_url in self.provider_urls:
            url = '{}/usearch/%s/?rss=1&field=seeders&sorder=desc'.format(try_url)
            full_url = url % encoded_search
            self.url = full_url
            #click.echo('>', full_url)

            parsed = feedparser.parse(full_url)
            if len(parsed['entries']) == 0:
                continue

            for show in parsed['entries']:
                dt = datetime.fromtimestamp(mktime(show['published_parsed']))
                date = dt.strftime('%b %d/%Y')
                size = U.pretty_filesize (show['torrent_contentlength'])
                title = show['title']

                show_data.append([
                    title,                    # title
                    size,                     # show size
                    date,                     # date
                    show['torrent_seeds'],    # seeds
                    self.shortname,
                    show['torrent_magneturi'] # id (download magnet url)
                ])

            return show_data
        return show_data
Example #9
0
    def search(self, search_string, season=False, episode=False):

        if season and episode:
            search_string = '%s' % (self.se_ep(season, episode, search_string))

        query = search_string
        encoded_search = urllib.parse.quote(query)
        show_data = []
        for try_url in self.provider_urls:
            url = '{}/usearch/%s/?rss=1&field=seeders&sorder=desc'.format(
                try_url)
            full_url = url % encoded_search
            self.url = full_url
            #click.echo('>', full_url)

            parsed = feedparser.parse(full_url)
            if len(parsed['entries']) == 0:
                continue

            for show in parsed['entries']:
                dt = datetime.fromtimestamp(mktime(show['published_parsed']))
                date = dt.strftime('%b %d/%Y')
                size = U.pretty_filesize(show['torrent_contentlength'])
                title = show['title']

                show_data.append([
                    title,  # title
                    size,  # show size
                    date,  # date
                    show['torrent_seeds'],  # seeds
                    self.shortname,
                    show['torrent_magneturi']  # id (download magnet url)
                ])

            return show_data
        return show_data
Example #10
0
    def search(self, search_string, season=False, episode=False):

        if season and episode:
            searches = self.se_ep(search_string, season, episode)
        else:
            searches = [search_string]

        # get token for api
        url = '{}?get_token=get_token&app_id=tvoverlord'.format(self.baseurl)
        try:
            r = requests.get(url)
        except requests.exceptions.ConnectionError:
            return []

        if r.status_code == 403:
            self.url = url
            return []

        j = r.json()

        token = j['token']

        search_data = []
        count = 0
        for search in searches:
            # the torrentapi only allows one query every two seconds
            if count > 0:
                time.sleep(2)
            count = count + 1

            search_tpl = '{}?mode=search&search_string={}&token={}&format=json_extended&sort=seeders&limit=100&app_id=tvoverlord'
            search_string = urllib.parse.quote(search)
            url = search_tpl.format(self.baseurl, search_string, token)
            # click.echo(url)
            self.url = self.url + ' ' + url

            try:
                r = requests.get(url)
            except requests.exceptions.ConnectionError:
                # can't connect, go to next url
                continue

            results = r.json()
            if 'error_code' in results.keys() and results['error_code'] == 20:
                continue  # no results found

            try:
                shows = results['torrent_results']
            except KeyError:
                # no results
                continue

            for show in shows:
                title = show['title']
                date = show['pubdate']
                date = date.split(' ')[0]
                size = show['size']
                size = U.pretty_filesize(size)
                seeds = show['seeders']
                magnet = show['download']

                search_data.append(
                    [title, size, date, seeds, self.shortname, magnet])

        return search_data
Example #11
0
    def search(self, search_string, season=False, episode=False):
        """
        Default Search: Our default is prefix match
        Search 123 will match 123, 1234, 1234abcdefg
        Search 123 will not match 0123, ab123, ab123yz

        AND search:
        -----------
        the words hello and world:
        hello world

        NOT search:
        -----------
        the word hello but NOT the word world:
        hello -world

        We can't do NOT only search
        -world

        OR search:
        ----------
        the words hello or world:
        hello or world

        Each "or" is treated as new query part
        hello abcd or hello efgh != hello abcd or efgh

        grouping:
        ---------
        the exact phrase hello world:
        "hello world"
        """

        if season and episode:
            search_string = '%s' % (self.se_ep(season, episode, search_string))

        url = '%s/nzbrss.aspx?' % self.provider_urls[0]
        query = {
            'q': search_string,
            'ig': 2,  # hide adult: 1=yes, 2=no
            'szs': 15,  # min size: 15=75m, 16=100m,
            'sze': 24,  # max size: 24=2gig
            'st': 5,  # sort.  5=relevence, 4=size (smallest first)
            'ns': 1,  # no spam
            'sp': 1,  # don't show passworded files
            'nfo': 0,  # has to have nfo  1=yes, 0=no
        }
        full_url = url + urllib.parse.urlencode(query)
        self.url = full_url

        parsed = feedparser.parse(full_url)

        header = [[search_string, full_url], ['Name', 'Date', 'Size'],
                  [0, 12, 10], ['<', '<', '>']]

        show_data = []
        for show in parsed['entries']:
            dt = datetime.fromtimestamp(mktime(show['published_parsed']))
            date = dt.strftime('%b %d/%Y')

            size = U.pretty_filesize(show['links'][0]['length'])

            nzbfile_url = show['links'][0]['href']
            nzbfile_url = nzbfile_url.replace(' ', '_')

            show_data.append([
                show['title'],
                size,
                date,
                self.shortname,
                nzbfile_url,
            ])

        return show_data
Example #12
0
    def generate(self):
        colors = Config.color.table
        title_bar = style(
            '|',
            fg=colors.bar.fg,
            bg=colors.header.bg,
        )
        bar = style(
            '|',
            fg=colors.bar.fg,
        )

        # TITLE --------------------------------------------
        title = '  %s' % self.table.title.text
        title = title.ljust(Config.console_columns)
        title = style(title, bold=True, fg=colors.title.fg, bg=colors.title.bg)
        click.echo(title)

        # HEADER ROW ---------------------------------------
        header = self.table.header
        header_row = [style(' ', bg=colors.header.bg, fg=colors.header.fg)]
        NUMBER_SPACE = 1
        BAR_COUNT = len(header.widths)
        flex_width = (Config.console_columns - sum(header.widths) -
                      NUMBER_SPACE - BAR_COUNT)

        for title, width, alignment in zip(header.titles, header.widths,
                                           header.alignments):
            if width == 0:
                width = flex_width
            if alignment == '<':
                title = title[:width].ljust(width)
            elif alignment == '>':
                title = title[:width].rjust(width)
            elif alignment == '=':
                title = title[:width].center(width)
            else:
                title = title[:width].ljust(width)

            header_row.append(
                style(
                    title,
                    bg=colors.header.bg,
                    fg=colors.header.fg,
                ))

        header_row = title_bar.join(header_row)

        click.echo(header_row)

        # BODY ROWS -----------------------------------------

        # key has the s, r, q, m removed to not interfere with the
        # ask_user options.  This list can have anything, as long as
        # they are single characters.  This is aprox 90 characters.
        key = """abcdefghijklnoptuvwxyzABCDEFGHIJKLMNOPQRSTUVW"""
        key += """XYZ0123456789!#$%&()*+-./:;<=>?@[\\]^_`{|}"'~"""
        key = list(key)

        self.table.body = self.table.body[:self.display_count]
        for row, counter in zip(self.table.body, key):
            # look through the title cell to see if any have 720 or
            # 1080 in the string and mark this row as high def.
            is_hidef = False
            if '720p' in row[0] or '1080p' in row[0]:
                is_hidef = True

            row_arr = [counter]
            for i, width, align in zip(row, header.widths, header.alignments):
                i = str(i)
                if width == 0:
                    width = flex_width
                row_item = i
                row_item = U.snip(row_item, width)
                row_item = row_item.strip()

                if align == '<':
                    row_item = row_item.ljust(width)
                if align == '>':
                    row_item = row_item.rjust(width)
                if align == '=':
                    row_item = row_item.center(width)
                else:
                    row_item = row_item.ljust(width)

                # if hi def, set the foreground to green
                if is_hidef:
                    row_item = style(row_item, fg=colors.hidef.fg)

                row_arr.append(row_item)
            click.echo(bar.join(row_arr))

        # USER INPUT ---------------------------------------
        choice = False
        while not choice:
            if self.is_postdownload:
                choice = self.ask_simple(key)
            else:
                if self.nondb:
                    choice = self.ask_simple(key)
                else:
                    choice = self.ask(key)
        return choice
Example #13
0
    def search(self, search_string, season=False, episode=False):
        if season and episode:
            search_string = '%s' % (self.se_ep(season, episode, search_string))

        query = search_string
        encoded_search = urllib.parse.quote(query)

        show_data = []
        for try_url in self.provider_urls:
            # cid=0 everything, cid=8 tv shows:
            lookfor = 0
            if season and episode:
                lookfor = 8  # tv only

            url = '{}/rss.xml?type=search&cid={}&search=%s'.format(
                try_url, lookfor)
            full_url = url % encoded_search
            self.url = full_url

            parsed = feedparser.parse(full_url)
            if 'bozo_exception' in parsed:
                continue

            if len(parsed['entries']) == 0:
                continue

            for show in parsed['entries']:
                try:
                    dt = datetime.fromtimestamp(
                        mktime(show['published_parsed']))
                    date = dt.strftime('%b %d/%Y')
                    size = U.pretty_filesize(show['size'])
                    title = show['title']
                except KeyError as e:
                    continue

                # extratorrent returns results that match any word in the
                # search, so the results end up with a bunch of stuff we aren't
                # interested in and we need to filter them out.
                stop = False
                for i in search_string.split(' '):
                    if i.lower() not in title.lower():
                        stop = True
                if stop:
                    continue

                # the ExtraTorrent rss feed doesn't supply the magnet link, or any
                # usable links (They must be downloaded from the site).  But the
                # feed has the URN hash, so we can build a magnet link from that.
                magnet_url = 'magnet:?xt=urn:btih:{}&dn={}'
                magnet_hash = show['info_hash']
                magnet = magnet_url.format(magnet_hash,
                                           urllib.parse.quote(title))
                seeds = show['seeders']
                if seeds == '---':
                    seeds = '0'

                show_data.append([
                    title,
                    size,
                    date,
                    seeds,
                    self.shortname,
                    magnet,
                ])

            return show_data

        return show_data
Example #14
0
def style(text, fg=None, bg=None, bold=None, strike=None, ul=None):
    if Config.is_win:
        fancy = click.style(text, fg=fg, bg=bg, bold=bold, underline=ul)
    else:
        fancy = U.style(text, fg=fg, bg=bg, bold=bold, strike=strike, ul=ul)
    return fancy
Example #15
0
    def search(self, search_string, season=False, episode=False, idx=None):

        """
        Default Search: Our default is prefix match
        Search 123 will match 123, 1234, 1234abcdefg
        Search 123 will not match 0123, ab123, ab123yz

        AND search:
        -----------
        the words hello and world:
        hello world

        NOT search:
        -----------
        the word hello but NOT the word world:
        hello -world

        We can't do NOT only search
        -world

        OR search:
        ----------
        the words hello or world:
        hello or world

        Each "or" is treated as new query part
        hello abcd or hello efgh != hello abcd or efgh

        grouping:
        ---------
        the exact phrase hello world:
        "hello world"
        """

        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        url = '%s/nzbrss.aspx?' % self.provider_urls[0]
        query = {
            'q': search_string,
            'ig': 2,    # hide adult: 1=yes, 2=no
            'szs': 15,  # min size: 15=75m, 16=100m,
            'sze': 24,  # max size: 24=2gig
            'st': 5,    # sort.  5=relevence, 4=size (smallest first)
            'ns': 1,    # no spam
            'sp': 1,    # don't show passworded files
            'nfo': 0,   # has to have nfo  1=yes, 0=no
        }
        full_url = url + urllib.parse.urlencode(query)
        self.url = full_url

        parsed = feedparser.parse(full_url)

        header = [
            [search_string, full_url],
            ['Name', 'Date', 'Size'],
            [0, 12, 10],
            ['<', '<', '>']
        ]

        show_data = []
        for show in parsed['entries']:
            dt = datetime.fromtimestamp(mktime(show['published_parsed']))
            date = dt.strftime('%b %d/%Y')

            size = U.pretty_filesize(show['links'][0]['length'])

            nzbfile_url = show['links'][0]['href']
            nzbfile_url = nzbfile_url.replace(' ', '_')

            show_data.append([
                show['title'],
                size,
                date,
                self.shortname,
                '%s|%s' % (idx, nzbfile_url),
            ])

        return show_data
Example #16
0
    def search(self, search_string, season=False, episode=False):

        if season and episode:
            searches = self.se_ep(search_string, season, episode)
        else:
            searches = [search_string]

        # get token for api
        url = '{}?get_token=get_token&app_id=tvoverlord'.format(self.baseurl)

        try:
            r = requests.get(url)
        except requests.exceptions.ConnectionError:
            return []

        if r.status_code == 403:
            self.url = url
            return []

        try:
            j = r.json()
        except ValueError:
            return []  # not json

        token = j['token']

        search_data = []
        count = 0
        for search in searches:
            # the torrentapi only allows one query every two seconds
            if count > 0:
                time.sleep(2)
            count = count + 1

            search_tpl = '{}?mode=search&search_string={}&token={}&format=json_extended&sort=seeders&limit=100&app_id=tvoverlord'
            search_string = urllib.parse.quote(search)
            url = search_tpl.format(self.baseurl, search_string, token)
            # click.echo(url)
            self.url = self.url + ' ' + url

            try:
                r = requests.get(url)
            except requests.exceptions.ConnectionError:
                # can't connect, go to next url
                continue

            results = r.json()
            if 'error_code' in results.keys() and results['error_code'] == 20:
                continue  # no results found

            try:
                shows = results['torrent_results']
            except KeyError:
                # no results
                continue

            for show in shows:
                title = show['title']
                date = show['pubdate']
                date = date.split(' ')[0]
                size = show['size']
                size = U.pretty_filesize(size)
                seeds = show['seeders']
                magnet = show['download']

                search_data.append([title, size, date, seeds,
                                    self.shortname, magnet])

        return search_data
Example #17
0
    def generate(self):
        colors = Config.color.table
        title_bar = style(
            '|',
            fg=colors.bar.fg,
            bg=colors.header.bg,)
        bar = style(
            '|',
            fg=colors.bar.fg,
        )

        # TITLE --------------------------------------------
        title = '  %s' % self.table.title.text
        title = title.ljust(Config.console_columns)
        title = style(title,
                      bold=True,
                      fg=colors.title.fg,
                      bg=colors.title.bg)
        click.echo(title)

        # HEADER ROW ---------------------------------------
        header = self.table.header
        header_row = [style(' ', bg=colors.header.bg,
                                 fg=colors.header.fg)]
        NUMBER_SPACE = 1
        BAR_COUNT = len(header.widths)
        flex_width = (Config.console_columns - sum(header.widths) -
                      NUMBER_SPACE - BAR_COUNT)

        for title, width in zip(header.titles,
                                header.widths):
            if width == 0:
                width = flex_width
            title = title[:width].ljust(width)
            header_row.append(
                style(title,
                      bg=colors.header.bg,
                      fg=colors.header.fg,
                )
            )

        header_row = title_bar.join(header_row)

        click.echo(header_row)

        # BODY ROWS -----------------------------------------

        key = """abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW"""
        key += """XYZ0123456789!#$%&()*+-./:;<=>?@[\\]^_`|}{"'~"""
        if self.table_type == 'download':
            options = '\nLetter, [s]kip, skip [r]est of show, [q]uit or [m]ark as downloaded: '
            key = re.sub('[srqm]', '', key)

        elif self.table_type == 'nondb':
            options = '\nLetter or [q]uit: '
            key = re.sub('[q]', '', key)

        elif self.table_type == 'copy':
            options = '\nLetter, [a]ll or [q]uit: '
            key = re.sub('[aq]', '', key)

        elif self.table_type == 'redownload':
            options = '\nLetter or [q]uit: '
            key = re.sub('[q]', '', key)


        self.table.body = self.table.body[:self.display_count]
        for row, counter in zip(self.table.body, key):
            # look through the title cell to see if any have 720 or
            # 1080 in the string and mark this row as high def.
            is_hidef = False
            if '720p' in row[0] or '1080p' in row[0]:
                is_hidef = True

            row_arr = [counter]
            for i, width, align in zip(row, header.widths, header.alignments):
                i = str(i)
                if width == 0:
                    width = flex_width
                row_item = i
                row_item = U.snip(row_item, width)
                row_item = row_item.strip()

                if align == '<':
                    row_item = row_item.ljust(width)
                if align == '>':
                    row_item = row_item.rjust(width)
                if align == '=':
                    row_item = row_item.center(width)
                else:
                    row_item = row_item.ljust(width)

                # if hi def, set the foreground to green
                if is_hidef:
                    row_item = style(row_item, fg=colors.hidef.fg)

                row_arr.append(row_item)
            click.echo(bar.join(row_arr))

        # USER INPUT ---------------------------------------
        choice = False
        while not choice:
            choice = self.ask(options, key)
        return choice
Example #18
0
    def search(self, search_string, season=False, episode=False):
        """
        Search options:
        ---------------
        Minimum age:	 days
        Sort by:         agedesc, age, sizedesc, size
        Minimum size:	 MB
        Maximum size:	 MB
        Default query:
        Poster:
        NFO content:
        Has NFO file
        Hide crossposts
        Show complete releases only  1
        Hide possible spam           1


        Searching
        ---------

        In the default search form you can enter search terms you would like
        to search for. After searching the results shown will match ALL search
        terms entered. For example, if you search for

        foo bar

        all search results will contain both 'foo' AND 'bar'.

        Instead of using ALL terms you can also search for only one (or a few)
        of the terms. To do this you need to use the '|' symbol. For example,
        to search for either 'foo' or 'bar' you can enter the search term

        foo | bar

        On some occasions you would like to exclude results with certain words
        from your search. For example, if you want the word 'foo', but not
        'bar' in your search results you can enter

        foo -bar

        The last option we offer is to search for a sentence that should match
        exactly. In case of 'foo bar' these two words will not necessarily
        turn up next to each other in the search results. If you would like to
        search for some group of words you can put double-quotes around the
        terms. When you need 'foo' and 'bar' to be next to each other the
        correct search would be:

        "foo bar"

        You can't use more than 20 keywords when searching.



        Top level of data structure returned from NZBIndex:
        ---------------------------------------------------
        bozo        ?
        encoding    u'UTF-8'
        entries     all shows                           results['entries'][0...x]
        feed        info about the feed                 results['feed']
        headers
        href        the rss link for these results
        namespaces  ?
        status      200
        version     u'rss20'
        """

        # search_template = ('nzbindex.com/rss/?q=%s&minage=%s&sort=%s' +
        # '&minsize=%s&maxsize=%s&complete=%s&max=%s&more=1')

        if season and episode:
            search_string = '%s' % (
                self.se_ep(
                    season, episode, search_string))

        url = 'http://nzbindex.com/rss/?'
        query = {
            'q': search_string,
            'minage': '0',
            'sort': 'agedesc',  # age, agedesc, size, sizedesc
            'minsize': '100',   # mb
            'maxsize': '5000',  # mb
            'complete': '1',  # return only complete posts
            'max': '100',  # results per page
            'more': '1',
        }

        full_url = url + urllib.parse.urlencode(query)

        # print 'searching...'
        parsed = feedparser.parse(full_url)

        show_data = []
        for show in parsed['entries']:
            dt = datetime.fromtimestamp(mktime(show['published_parsed']))
            date = dt.strftime('%b %d/%Y')

            size = U.pretty_filesize(show['links'][1]['length'])

            show_data.append([
                show['title'],
                size,
                date,
                self.shortname,
                show['links'][1]['href'],
            ])

        header = [
            #'%s  (%s)' % (search_string, self.provider_url),
            [search_string, full_url],
            ['Name', 'Date', 'Size'],
            [0, 12, 10],
            ['<', '<', '>']
        ]

        pp(show_data)
        return show_data