def on_task_input(self, task, config):

        url = "http://hummingbird.me/api/v1/users/%s/library" % (config['username'])
        try:
            data = json.load(urllib2.urlopen(url))
        except ValueError:
            raise plugin.PluginError('Error getting list from hummingbird.')

        if not data:
            return

        entries = []
        chosen_lists = config['lists']
        for item in data:
            if item['status'] not in chosen_lists:
                continue
            if item['anime']['show_type'] == 'Movie':
                continue
            if config.get('currentonly') and item['anime']['status'] != 'Currently Airing':
                continue
            if config.get('finishedonly') and item['anime']['status'] != 'Finished Airing':
                continue    
            entry = Entry()
            entry['title'] = item['anime']['title']
            entry['url'] = ''
            if entry.isvalid():
                if config.get('latest'):
                    entry['series_episode'] = item['episodes_watched']
                    entry['series_id_type'] = 'sequence'
                    entry['title'] += ' ' + str(entry['series_episode'])
                entries.append(entry)

        return entries
Ejemplo n.º 2
0
    def on_task_input(self, task, config):

        url = "http://hummingbird.me/api/v1/users/%s/library" % (config['username'])
        try:
            data = requests.get(url).json()
        except ValueError:
            raise plugin.PluginError('Error getting list from hummingbird.')

        if not data:
            return

        entries = []
        chosen_lists = config['lists']
        for item in data:
            if item['status'] not in chosen_lists:
                continue
            if item['anime']['show_type'] != 'Movie':
                continue 
            entry = Entry()
            entry['title'] = item['anime']['title']
            entry['url'] = ''
            if entry.isvalid():
                entries.append(entry)

        return entries
Ejemplo n.º 3
0
 def on_task_input(self, task, config):
     json = self.get_page(task, config, 1)
     entries = []
     pages = int(math.ceil(json['totalRecords'] / config.get('page_size')))  # Sets number of requested pages
     current_series_id = 0  # Initializes current series parameter
     for page in range(1, pages + 1):
         json = self.get_page(task, config, page)
         for record in json['records']:
             # Verifies that we only get the first missing episode from a series
             if current_series_id != record['seriesId']:
                 current_series_id = record['seriesId']
                 season = record['seasonNumber']
                 episode = record['episodeNumber']
                 entry = Entry(url='',
                               series_name=record['series']['title'],
                               series_season=season,
                               series_episode=episode,
                               series_id='S%02dE%02d' % (season, episode),
                               tvdb_id=record['series'].get('tvdbId'),
                               tvrage_id=record['series'].get('tvRageId'),
                               tvmaze_id=record['series'].get('tvMazeId'),
                               title=record['series']['title'] + ' ' + 'S%02dE%02d' % (season, episode))
                 if entry.isvalid():
                     entries.append(entry)
                 else:
                     log.error('Invalid entry created? {}'.format(entry))
                 # Test mode logging
                 if entry and task.options.test:
                     log.verbose("Test mode. Entry includes:")
                     for key, value in list(entry.items()):
                         log.verbose('     {}: {}'.format(key.capitalize(), value))
     return entries
Ejemplo n.º 4
0
    def _get_watchlist_entries(self, task, config):
        email = config.get("email")
        log.info("Retrieving npo.nl episode watchlist for %s", email)

        response = self._get_page(task, config, "https://mijn.npo.nl/profiel/kijklijst")
        page = get_soup(response.content)

        self.csrf_token = page.find("meta", attrs={"name": "csrf-token"})["content"]

        entries = list()
        for listItem in page.findAll("div", class_="watch-list-item"):
            url = listItem.find("a")["href"]
            series_name = next(listItem.find("h3").stripped_strings)
            remove_url = listItem.find("a", class_="unwatch-confirm")["href"]
            entry_date = self._parse_date(listItem.find("span", class_="global__content-info").text)

            episode_id = url.split("/")[-1]
            title = "{} ({})".format(series_name, episode_id)

            e = Entry()
            e["url"] = self._prefix_url("https://mijn.npo.nl", url)
            e["title"] = title
            e["series_name"] = series_name
            e["series_name_plain"] = self._strip_accents(series_name)
            e["series_date"] = entry_date
            e["series_id_type"] = "date"
            e["description"] = listItem.find("p").text
            e["remove_url"] = self._prefix_url("https://mijn.npo.nl", remove_url)

            if config.get("remove_accepted"):
                e.on_complete(self.entry_complete, task=task)

            entries.append(e)

        return entries
Ejemplo n.º 5
0
 def create_entry(self, filepath, test_mode):
     """
     Creates a single entry using a filepath and a type (file/dir)
     """
     entry = Entry()
     entry["location"] = filepath
     entry["url"] = "file://{}".format(filepath)
     entry["filename"] = filepath.name
     if filepath.isfile():
         entry["title"] = filepath.namebase
     else:
         entry["title"] = filepath.name
     try:
         entry["timestamp"] = os.path.getmtime(filepath)
     except Exception as e:
         log.warning("Error setting timestamp for %s: %s" % (filepath, e))
         entry["timestamp"] = None
     if entry.isvalid():
         if test_mode:
             log.info("Test mode. Entry includes:")
             log.info("    Title: %s" % entry["title"])
             log.info("    URL: %s" % entry["url"])
             log.info("    Filename: %s" % entry["filename"])
             log.info("    Location: %s" % entry["location"])
             log.info("    Timestamp: %s" % entry["timestamp"])
         return entry
     else:
         log.error("Non valid entry created: {}".format(entry))
         return
Ejemplo n.º 6
0
def lookup_movie(title, session, identifiers=None):
    try:
        imdb_lookup = plugin.get('imdb_lookup', 'movie_list').lookup
    except DependencyError:
        imdb_lookup = None

    try:
        tmdb_lookup = plugin.get('tmdb_lookup', 'movie_list').lookup
    except DependencyError:
        tmdb_lookup = None

    if not (imdb_lookup or tmdb_lookup):
        return

    entry = Entry(title=title)
    if identifiers:
        for identifier in identifiers:
            for key, value in identifier.items():
                entry[key] = value
    try:
        imdb_lookup(entry, session=session)
    # IMDB lookup raises PluginError instead of the normal ValueError
    except PluginError:
        tmdb_lookup(entry)

    # Return only if lookup was successful
    if entry.get('movie_name'):
        return entry
    return
Ejemplo n.º 7
0
    def entries_from_lines(self, lines):
        """

        :param lines: list of lines
        :return list: list of entries generated from lines
        """
        entries = []
        for line in lines:
            entry = Entry(irc_raw_message=line)

            # Use the message as title
            entry['title'] = line

            # find a url...
            url_match = URL_MATCHER.findall(line)
            if url_match:
                # We have a URL(s)!, generate an entry
                urls = list(url_match)
                url = urls[-1]
                entry.update({'urls': urls, 'url': url})

            if not entry.get('url'):
                log.error('Parsing message failed. No url found.')
                continue

            entries.append(entry)

        return entries
Ejemplo n.º 8
0
    def on_task_input(self, task, config):
        email = config.get('email')
        log.info('Retrieving npo.nl watchlist for %s', email)

        profile_response = self._get_profile(task, config)
        profile_page = BeautifulSoup(profile_response.content, 'html5lib')

        self.csrf_token = profile_page.find('meta', attrs={'name': 'csrf-token'})['content']

        entries = list()
        for listItem in profile_page.findAll('div', class_='list-item'):
            title = next(listItem.find('h4').stripped_strings)
            subtitle = listItem.find('h5').text
            url = listItem.find('a')['href']
            remove_url = listItem.find('a', class_='remove-button')['href']

            if subtitle == 'Serie':
                log.debug('Skipping series entry for %s', title)
                continue

            e = Entry()
            e['title'] = '{} ({})'.format(title, subtitle)
            e['url'] = url

            e['series_name'] = title
            e['series_name_plain'] = self._strip_accents(title)
            e['description'] = listItem.find('p').text
            e['remove_url'] = remove_url

            if config.get('remove_accepted'):
                e.on_complete(self.entry_complete, task=task)

            entries.append(e)

        return entries
Ejemplo n.º 9
0
    def _get_watchlist_entries(self, task, config):
        email = config.get('email')
        log.info('Retrieving npo.nl episode watchlist for %s', email)

        response = self._get_page(task, config, 'https://mijn.npo.nl/profiel/kijklijst')
        page = get_soup(response.content)

        self.csrf_token = page.find('meta', attrs={'name': 'csrf-token'})['content']

        entries = list()
        for list_item in page.findAll('div', class_='watch-list-item'):
            url = list_item.find('a')['href']
            series_name = next(list_item.find('h3').stripped_strings)
            remove_url = list_item.find('a', class_='unwatch-confirm')['href']
            entry_date = self._parse_date(list_item.find('span', class_='global__content-info').text)

            episode_id = url.split('/')[-1]
            title = '{} ({})'.format(series_name, episode_id)

            e = Entry()
            e['url'] = self._prefix_url('https://mijn.npo.nl', url)
            e['title'] = title
            e['series_name'] = series_name
            e['series_name_plain'] = self._convert_plain(series_name)
            e['series_date'] = entry_date
            e['series_id_type'] = 'date'
            e['description'] = list_item.find('p').text
            e['remove_url'] = self._prefix_url('https://mijn.npo.nl', remove_url)

            if config.get('remove_accepted'):
                e.on_complete(self.entry_complete, task=task)

            entries.append(e)

        return entries
Ejemplo n.º 10
0
    def entries_from_linepatterns(self, lines):
        """

        :param lines: list of lines from irc
        :return list: list of entries generated from lines
        """
        entries = []
        for line in lines:
            # If it's listed in ignore lines, skip it
            ignore = False
            for rx, expected in self.ignore_lines:
                if rx.match(line) and expected:
                    log.debug('Ignoring message: matched ignore line')
                    ignore = True
                    break
            if ignore:
                continue

            entry = Entry(irc_raw_message=line)

            match = self.match_message_patterns(self.linepatterns, line)

            # Generate the entry and process it through the linematched rules
            if not match:
                log.error('Failed to parse message. Skipping: %s', line)
                continue

            entry.update(match)

            entries.append(entry)

        return entries
Ejemplo n.º 11
0
 def search_entry(self, series, season, episode, task, rerun=True):
     # Extract the alternate names for the series
     alts = [alt.alt_name for alt in series.alternate_names]
     # Also consider series name without parenthetical (year, country) an alternate name
     paren_match = re.match(r'(.+?)( \(.+\))?$', series.name)
     if paren_match.group(2):
         alts.append(paren_match.group(1))
     if series.identified_by == 'ep':
         search_strings = ['%s %s' % (series.name, id) for id in self.ep_identifiers(season, episode)]
         series_id = 'S%02dE%02d' % (season, episode)
         for alt in alts:
             search_strings.extend(['%s %s' % (alt, id) for id in self.ep_identifiers(season, episode)])
     else:
         search_strings = ['%s %s' % (series.name, id) for id in self.sequence_identifiers(episode)]
         series_id = episode
         for alt in alts:
             search_strings.extend(['%s %s' % (alt, id) for id in self.sequence_identifiers(episode)])
     entry = Entry(title=search_strings[0], url='',
                   search_strings=search_strings,
                   series_name=series.name,
                   series_alternate_names=alts,  # Not sure if this field is useful down the road.
                   series_season=season,
                   series_episode=episode,
                   season_pack_lookup=False,
                   series_id=series_id,
                   series_id_type=series.identified_by)
     if rerun:
         entry.on_complete(self.on_search_complete, task=task, identified_by=series.identified_by)
     return entry
Ejemplo n.º 12
0
def do_cli(manager, options):
    if not options.url:
        # Determine if first positional argument is a URL or a title
        if '://' in options.title:
            options.url = options.title
            options.title = None

    if options.url and not options.title:
        # Attempt to get a title from the URL response's headers
        try:
            value, params = cgi.parse_header(requests.head(options.url).headers['Content-Disposition'])
            options.title = params['filename']
        except KeyError:
            console('No title given, and couldn\'t get one from the URL\'s HTTP response. Aborting.')
            return

    entry = Entry(title=options.title)
    if options.url:
        entry['url'] = options.url
    else:
        entry['url'] = 'http://localhost/inject/%s' % ''.join(random.sample(string.letters + string.digits, 30))
    if options.force:
        entry['immortal'] = True
    if options.accept:
        entry.accept(reason='accepted by CLI inject')
    if options.fields:
        for key, value in options.fields:
            entry[key] = value
    options.inject = [entry]
    manager.execute_command(options)
Ejemplo n.º 13
0
 def create_entry(self, filepath, test_mode):
     """
     Creates a single entry using a filepath and a type (file/dir)
     """
     filepath = filepath.abspath()
     entry = Entry()
     entry['location'] = filepath
     entry['url'] = urlparse.urljoin('file:', urllib.pathname2url(filepath.encode('utf8')))
     entry['filename'] = filepath.name
     if filepath.isfile():
         entry['title'] = filepath.namebase
     else:
         entry['title'] = filepath.name
     try:
         entry['timestamp'] = datetime.fromtimestamp(filepath.getmtime())
     except Exception as e:
         log.warning('Error setting timestamp for %s: %s' % (filepath, e))
         entry['timestamp'] = None
     if entry.isvalid():
         if test_mode:
             log.info("Test mode. Entry includes:")
             log.info("    Title: %s" % entry["title"])
             log.info("    URL: %s" % entry["url"])
             log.info("    Filename: %s" % entry["filename"])
             log.info("    Location: %s" % entry["location"])
             log.info("    Timestamp: %s" % entry["timestamp"])
         return entry
     else:
         log.error('Non valid entry created: %s ' % entry)
         return
Ejemplo n.º 14
0
    def on_task_input(self, task, config):
        """Creates an entry for each item in your couchpotato wanted list.

        Syntax:

        couchpotato:
          base_url: <value>
          port: <value>
          api_key: <value>

        Options base_url, port and api_key are required.
        """

        url = '%s:%s/api/%s/movie.list?status=active' \
              % (config['base_url'], config['port'], config['api_key'])
        json = task.requests.get(url).json()
        entries = []
        for movie in json['movies']:
            if movie['status'] == 'active':
                title = movie["title"]
                imdb = movie['info']['imdb']
                tmdb = movie['info']['tmdb_id']
                entry = Entry(title=title,
                              url='',
                              imdb_id=imdb,
                              tmdb_id=tmdb)
                if entry.isvalid():
                    entries.append(entry)
                else:
                    log.debug('Invalid entry created? %s' % entry)

        return entries
Ejemplo n.º 15
0
    def on_task_input(self, task, config):
        '''
        This plugin returns ALL of the shows monitored by Sonarr.
        This includes both ongoing and ended.
        Syntax:

        sonarr:
          base_url=<value>
          port=<value>
          api_key=<value>
          include_ended=<yes|no>
          only_monitored=<yes|no>

        Options base_url, port and api_key are required.

        Use with input plugin like discover and/or cofnigure_series.
        Example:

        download-tv-task:
          configure_series:
            settings:
              quality:
                - 720p
            from:
              sonarr:
                base_url: http://localhost
                port: 8989
                api_key: MYAPIKEY1123
          discover:
            what:
              - emit_series: yes
            from:
              torrentz: any
          download:
            /download/tv

        Note that when using the configure_series plugin with Sonarr
        you are basically synced to it, so removing a show in Sonarr will
        remove it in flexget as well,which good be positive or negative,
        depending on your usage.
        '''
        url = '%s:%s/api/series' % (config['base_url'], config['port'])
        headers = {'X-Api-Key': config['api_key']}
        json = task.requests.get(url, headers=headers).json()
        entries = []
        for show in json:
            if show['monitored'] or not config.get('only_monitored'):
                if config.get('include_ended') or show['status'] != 'ended':
                    entry = Entry(title=show['title'],
                                  url='',
                                  series_name=show['title'],
                                  tvdb_id=show['tvdbId'],
                                  tvrage_id=show['tvRageId'])
                    if entry.isvalid():
                        entries.append(entry)
                    else:
                        log.debug('Invalid entry created? %s' % entry)

        return entries
Ejemplo n.º 16
0
    def items(self):
        if self._items is None:
            endpoint = self.get_list_endpoint()

            log.verbose('Retrieving `%s` list `%s`' % (self.config['type'], self.config['list']))
            try:
                result = self.session.get(get_api_url(endpoint))
                try:
                    data = result.json()
                except ValueError:
                    log.debug('Could not decode json from response: %s', result.text)
                    raise plugin.PluginError('Error getting list from trakt.')
            except RequestException as e:
                raise plugin.PluginError('Could not retrieve list from trakt (%s)' % e.args[0])

            if not data:
                log.warning('No data returned from trakt for %s list %s.' % (self.config['type'], self.config['list']))
                return []

            entries = []
            list_type = (self.config['type']).rstrip('s')
            for item in data:
                if self.config['type'] == 'auto':
                    list_type = item['type']
                # Collection and watched lists don't return 'type' along with the items (right now)
                if 'type' in item and item['type'] != list_type:
                    log.debug('Skipping %s because it is not a %s' % (item[item['type']].get('title', 'unknown'),
                                                                      list_type))
                    continue
                if list_type != 'episode' and not item[list_type]['title']:
                    # Skip shows/movies with no title
                    log.warning('Item in trakt list does not appear to have a title, skipping.')
                    continue
                entry = Entry()
                if list_type == 'episode':
                    entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (
                        item['show']['ids']['slug'], item['episode']['season'], item['episode']['number'])
                else:
                    entry['url'] = 'http://trakt.tv/%s/%s' % (list_type, item[list_type]['ids'].get('slug'))
                entry.update_using_map(field_maps[list_type], item)
                # Override the title if strip_dates is on. TODO: a better way?
                if self.config.get('strip_dates'):
                    if list_type in ['show', 'movie']:
                        entry['title'] = item[list_type]['title']
                    elif list_type == 'episode':
                        entry['title'] = '{show[title]} S{episode[season]:02}E{episode[number]:02}'.format(**item)
                        if item['episode']['title']:
                            entry['title'] += ' {episode[title]}'.format(**item)
                if entry.isvalid():
                    if self.config.get('strip_dates'):
                        # Remove year from end of name if present
                        entry['title'] = re.sub(r'\s+\(\d{4}\)$', '', entry['title'])
                    entries.append(entry)
                else:
                    log.debug('Invalid entry created? %s' % entry)

            self._items = entries
        return self._items
Ejemplo n.º 17
0
    def list_entries(self):
        series_url, series_headers = self.request_builder(
            self.config.get("base_url"), "series", self.config.get("port"), self.config["api_key"]
        )
        json = self.get_json(series_url, series_headers)

        # Retrieves Sonarr's profile list if include_data is set to true
        if self.config.get("include_data"):
            profile_url, profile_headers = self.request_builder(
                self.config.get("base_url"), "profile", self.config.get("port"), self.config["api_key"]
            )
            profiles_json = self.get_json(profile_url, profile_headers)

        entries = []
        for show in json:
            fg_qualities = ""  # Initializes the quality parameter
            fg_cutoff = ""
            path = None
            if not show["monitored"] and self.config.get(
                "only_monitored"
            ):  # Checks if to retrieve just monitored shows
                continue
            if show["status"] == "ended" and not self.config.get("include_ended"):  # Checks if to retrieve ended shows
                continue
            if self.config.get("include_data") and profiles_json:  # Check if to retrieve quality & path
                path = show.get("path")
                for profile in profiles_json:
                    if profile["id"] == show["profileId"]:  # Get show's profile data from all possible profiles
                        fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)
            entry = Entry(
                title=show["title"],
                url="",
                series_name=show["title"],
                tvdb_id=show.get("tvdbId"),
                tvrage_id=show.get("tvRageId"),
                tvmaze_id=show.get("tvMazeId"),
                imdb_id=show.get("imdbid"),
                slug=show.get("titleSlug"),
                sonarr_id=show.get("id"),
                configure_series_target=fg_cutoff,
            )
            if len(fg_qualities) > 1:
                entry["configure_series_qualities"] = fg_qualities
            elif len(fg_qualities) == 1:
                entry["configure_series_quality"] = fg_qualities[0]
            else:
                entry["configure_series_quality"] = fg_qualities
            if path:
                entry["configure_series_path"] = path
            if entry.isvalid():
                log.debug("returning entry %s", entry)
                entries.append(entry)
            else:
                log.error("Invalid entry created? %s" % entry)
                continue

        return entries
Ejemplo n.º 18
0
    def entries_from_multilinepatterns(self, lines):
        """

        :param lines: list of lines
        :return list: list of entries generated from lines
        """
        entries = []
        rest = []  # contains the rest of the lines
        while len(lines) > 0:
            entry = Entry()
            raw_message = ''
            matched_lines = []
            for idx, (rx, vals, optional) in enumerate(self.multilinepatterns):
                log.debug('Using pattern %s to parse message vars', rx.pattern)
                # find the next candidate line
                line = ''
                for l in list(lines):
                    # skip ignored lines
                    for ignore_rx, expected in self.ignore_lines:
                        if ignore_rx.match(l) and expected:
                            log.debug('Ignoring message: matched ignore line')
                            lines.remove(l)
                            break
                    else:
                        line = l
                        break

                raw_message += '\n' + line
                match = self.match_message_patterns([(rx, vals, optional)], line)
                if match:
                    entry.update(match)
                    matched_lines.append(line)
                    lines.remove(line)
                elif optional:
                    log.debug('No match for optional extract pattern found.')
                elif not line:
                    rest = matched_lines + lines
                    break
                elif (
                    idx == 0
                ):  # if it's the first regex that fails, then it's probably just garbage
                    log.error('No matches found for pattern %s', rx.pattern)
                    lines.remove(line)
                    rest = lines
                    break
                else:
                    log.error('No matches found for pattern %s', rx.pattern)
                    rest = lines
                    break

            else:
                entry['irc_raw_message'] = raw_message

                entries.append(entry)
                continue

        return entries, rest
Ejemplo n.º 19
0
    def _parse_tiles(self, task, config, tiles, series_info=None):
        max_age = config.get('max_episode_age_days')
        entries = list()

        if tiles is not None:
            for list_item in tiles.findAll('div', class_='npo-asset-tile-container'):
                url = list_item.find('a')['href']
                episode_name = next(list_item.find('h2').stripped_strings)
                timer = next(list_item.find('div', class_='npo-asset-tile-timer').stripped_strings)
                remove_url = list_item.find('div', class_='npo-asset-tile-delete')

                episode_id = url.split('/')[-1]
                title = '{} ({})'.format(episode_name, episode_id)

                not_available = list_item.find('div', class_='npo-asset-tile-availability')['data-to']
                if not_available:
                    log.debug('Skipping %s, no longer available', title)
                    continue

                entry_date = url.split('/')[4]
                entry_date = self._parse_date(entry_date)

                if max_age >= 0 and (date.today() - entry_date) > timedelta(days=max_age):
                    log.debug('Skipping %s, aired on %s', title, entry_date)
                    continue

                if not series_info:
                    tile_series_info = self._get_series_info(task, config, episode_name, url)
                else:
                    tile_series_info = series_info

                series_name = tile_series_info['npo_name']

                e = Entry()
                e['url'] = url
                e['title'] = title
                e['series_name'] = series_name
                e['series_name_plain'] = self._convert_plain(series_name)
                e['series_date'] = entry_date
                e['series_id_type'] = 'date'
                e['npo_url'] = tile_series_info['npo_url']
                e['npo_name'] = tile_series_info['npo_name']
                e['npo_description'] = tile_series_info['npo_description']
                e['npo_language'] = tile_series_info['npo_language']
                e['npo_runtime'] = timer.strip('min').strip()
                e['language'] = tile_series_info['npo_language']

                if remove_url and remove_url['data-link']:
                    e['remove_url'] = remove_url['data-link']

                    if config.get('remove_accepted'):
                        e.on_complete(self.entry_complete, task=task)

                entries.append(e)

        return entries
Ejemplo n.º 20
0
    def on_task_input(self, task, config):
        url = config['url']
        if '://' in url:
            lines = task.requests.get(url).iter_lines()
        else:
            lines = open(url, 'rb').readlines()

        entry_config = config.get('entry')
        format_config = config.get('format', {})

        entries = []
        # keep track what fields have been found
        used = {}
        entry = Entry()

        # now parse text
        for line in lines:
            for field, regexp in entry_config.iteritems():
                #log.debug('search field: %s regexp: %s' % (field, regexp))
                match = re.search(regexp, line)
                if match:
                    # check if used field detected, in such case start with new entry
                    if field in used:
                        if entry.isvalid():
                            log.info('Found field %s again before entry was completed. \
                                      Adding current incomplete, but valid entry and moving to next.' % field)
                            self.format_entry(entry, format_config)
                            entries.append(entry)
                        else:
                            log.info('Invalid data, entry field %s is already found once. Ignoring entry.' % field)
                        # start new entry
                        entry = Entry()
                        used = {}

                    # add field to entry
                    try:
                        entry[field] = match.group(1)
                    except IndexError:
                        log.error('regex for field `%s` must contain a capture group' % field)
                        raise plugin.PluginError('Your text plugin config contains errors, please correct them.')
                    used[field] = True
                    log.debug('found field: %s value: %s' % (field, entry[field]))

                # if all fields have been found
                if len(used) == len(entry_config):
                    # check that entry has atleast title and url
                    if not entry.isvalid():
                        log.info('Invalid data, constructed entry is missing mandatory fields (title or url)')
                    else:
                        self.format_entry(entry, format_config)
                        entries.append(entry)
                        log.debug('Added entry %s' % entry)
                        # start new entry
                        entry = Entry()
                        used = {}
        return entries
Ejemplo n.º 21
0
    def on_task_input(self, task, config):
        if not task.requests.cookies:
            username = config['username']
            password = config['password']

            log.debug("Logging in to %s ..." % URL)
            params = {
                'username': username,
                'password': password,
                'action': 'Login'
            }
            try:
                loginsrc = task.requests.post(URL + 'login.php', data=params)
                if 'login' in loginsrc.url:
                    raise plugin.PluginWarning(('Login to myepisodes.com failed, please check '
                                                'your account data or see if the site is down.'), log)
            except RequestException as e:
                raise plugin.PluginError("Error logging in to myepisodes: %s" % e)

        page = task.requests.get(URL + "myshows/manage/").content
        try:
            soup = get_soup(page)
        except Exception as e:
            raise plugin.PluginError("Unable to parse myepisodes.com page: %s" % (e,))

        entries = []

        def show_list(select_id):
            return soup.find('select', {'id': select_id}).findAll('option')

        options = show_list('shows')
        if config['include_ignored']:
            options = chain(options, show_list('ignored_shows'))
        for option in options:
            name = option.text
            if config.get('strip_dates'):
                # Remove year from end of name if present
                name = re.sub(r'\s+\(\d{4}\)$', '', name)
            showid = option.get('value')
            url = '%sviews.php?type=epsbyshow&showid=%s' % (URL, showid)

            entry = Entry()
            entry['title'] = name
            entry['url'] = url
            entry['series_name'] = name
            entry['myepisodes_id'] = showid

            if entry.isvalid():
                entries.append(entry)
            else:
                log.debug('Invalid entry created? %s' % entry)

        if not entries:
            log.warning("No shows found on myepisodes.com list. Maybe you need to add some first?")

        return entries
Ejemplo n.º 22
0
 def make_entry(self, fields, season, episode):
     entry = Entry()
     entry.update(fields)
     entry['series_season'] = season
     entry['series_episode'] = episode
     entry['series_id_type'] = 'ep'
     entry['series_id'] = 'S%02dE%02d' % (season, episode)
     entry['title'] = entry['series_name'] + ' ' + entry['series_id']
     entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)
     return entry
Ejemplo n.º 23
0
 def make_entry(self, fields, season, episode):
     entry = Entry()
     entry.update(fields)
     entry["series_season"] = season
     entry["series_episode"] = episode
     entry["series_id_type"] = "ep"
     entry["series_id"] = "S%02dE%02d" % (season, episode)
     entry["title"] = entry["series_name"] + " " + entry["series_id"]
     entry["url"] = "http://trakt.tv/shows/%s/seasons/%s/episodes/%s" % (fields["trakt_id"], season, episode)
     return entry
Ejemplo n.º 24
0
    def _parse_tiles(self, task, config, tiles, series_info):
        max_age = config.get('max_episode_age_days')
        entries = []

        if tiles is not None:
            for tile in tiles:
                # there is only one list_item per tile
                for list_item in get_soup(tile).findAll('div', class_='npo-asset-tile-container'):
                    episode_id = list_item['id']
                    log.debug('Parsing episode: %s', episode_id)

                    url = list_item.find('a')['href']
                    # Check if the URL found to the episode matches the expected pattern
                    if len(url.split('/')) != 6:
                        log.verbose('Skipping %s, the URL has an unexpected pattern: %s', episode_id, url)
                        continue  # something is wrong; skip this episode

                    episode_name = list_item.find('h2')
                    if episode_name:
                        title = '{} ({})'.format(next(episode_name.stripped_strings), episode_id)
                    else:
                        title = '{}'.format(episode_id)
                    timer = next(list_item.find('div', class_='npo-asset-tile-timer').stripped_strings)
                    remove_url = list_item.find('div', class_='npo-asset-tile-delete')

                    not_available = list_item.find('div', class_='npo-asset-tile-availability')['data-to']
                    if not_available:
                        log.debug('Skipping %s, no longer available', title)
                        continue

                    entry_date = url.split('/')[-2]
                    entry_date = self._parse_date(entry_date)

                    if max_age >= 0 and date.today() - entry_date > timedelta(days=max_age):
                        log.debug('Skipping %s, aired on %s', title, entry_date)
                        continue

                    e = Entry(series_info)
                    e['url'] = url
                    e['title'] = title
                    e['series_name'] = series_info['npo_name']
                    e['series_name_plain'] = self._convert_plain(series_info['npo_name'])
                    e['series_date'] = entry_date
                    e['series_id_type'] = 'date'
                    e['npo_id'] = episode_id
                    e['npo_runtime'] = timer.strip('min').strip()
                    e['language'] = series_info['npo_language']

                    if remove_url and remove_url['data-link']:
                        e['remove_url'] = remove_url['data-link']
                        if config.get('remove_accepted'):
                            e.on_complete(self.entry_complete, task=task)
                    entries.append(e)

        return entries
Ejemplo n.º 25
0
    def on_task_input(self, task, config):
        if config.get('account') and not config.get('username'):
            config['username'] = '******'
        session = get_session(account=config.get('account'))
        endpoint = ['users', config['username']]
        if type(config['list']) is dict:
            endpoint += ('ratings', config['type'], config['list']['rating'])
        elif config['list'] in ['collection', 'watchlist', 'watched', 'ratings']:
            endpoint += (config['list'], config['type'])
        else:
            endpoint += ('lists', make_list_slug(config['list']), 'items')

        log.verbose('Retrieving `%s` list `%s`' % (config['type'], config['list']))
        try:
            result = session.get(get_api_url(endpoint))
            try:
                data = result.json()
            except ValueError:
                log.debug('Could not decode json from response: %s', result.text)
                raise plugin.PluginError('Error getting list from trakt.')
        except RequestException as e:
            raise plugin.PluginError('Could not retrieve list from trakt (%s)' % e.args[0])

        if not data:
            log.warning('No data returned from trakt for %s list %s.' % (config['type'], config['list']))
            return

        entries = []
        list_type = (config['type']).rstrip('s')
        for item in data:
            # Collection and watched lists don't return 'type' along with the items (right now)
            if 'type' in item and item['type'] != list_type:
                log.debug('Skipping %s because it is not a %s' % (item[item['type']].get('title', 'unknown'),
                                                                  list_type))
                continue
            if not item[list_type]['title']:
                # There seems to be some bad shows sometimes in lists with no titles. Skip them.
                log.warning('Item in trakt list does not appear to have a title, skipping.')
                continue
            entry = Entry()
            if list_type == 'episode':
                entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (
                    item['show']['ids']['slug'], item['episode']['season'], item['episode']['number'])
            else:
                entry['url'] = 'http://trakt.tv/%s/%s' % (list_type, item[list_type]['ids'].get('slug'))
            entry.update_using_map(field_maps[list_type], item)
            if entry.isvalid():
                if config.get('strip_dates'):
                    # Remove year from end of name if present
                    entry['title'] = re.sub(r'\s+\(\d{4}\)$', '', entry['title'])
                entries.append(entry)
            else:
                log.debug('Invalid entry created? %s' % entry)

        return entries
Ejemplo n.º 26
0
    def on_task_input(self, task, config):
        '''
        This plugin returns ALL of the shows monitored by Sickbeard.
        This includes both ongoing and ended.
        Syntax:

        sickbeard:
          base_url=<value>
          port=<value>
          api_key=<value>

        Use with input plugin like discover and/or cofnigure_series.
        Example:

        download-tv-task:
          configure_series:
            settings:
              quality:
                - 720p
            from:
              sickbeard:
                base_url: http://localhost
                port: 8531
                api_key: MYAPIKEY1123
          discover:
            what:
              - emit_series: yes
            from:
              torrentz: any
          download:
            /download/tv

        Note that when using the configure_series plugin with Sickbeard
        you are basically synced to it, so removing a show in Sickbeard will
        remove it in flexget as well,which good be positive or negative,
        depending on your usage.
        '''
        url = '%s:%s/api/%s/?cmd=shows' % (config['base_url'], config['port'], config['api_key'])
        json = task.requests.get(url).json()
        entries = []
        for id, show in json['data'].items():
            if not show['paused'] or not config.get('only_monitored'):
                if config.get('include_ended') or show['status'] != 'Ended':
                    entry = Entry(title=show['show_name'],
                                  url='',
                                  series_name=show['show_name'],
                                  tvdb_id=show['tvdbid'],
                                  tvrage_id=show['tvrage_id'])
            if entry.isvalid():
                entries.append(entry)
            else:
                log.debug('Invalid entry created? %s' % entry)

        return entries
Ejemplo n.º 27
0
    def on_task_input(self, task, config):
        config = get_config(config)

        log.debug('Starting MyAnimeList plugin')
        # Retrieve username and remove invalid characters
        username = safe_username(config['username'])

        status = config.get('list', 'watching')

        url = self.API_URL % username
        log.verbose('Retrieving MyAnimeList on %s.', url)

        headers = {'User-Agent': config.get('user-agent', self.user_agent)}
        log.debug('Using %s', headers)

        resp = task.requests.get(url, headers=headers)
        if not resp or resp.status_code != 200:
            log.warning('No data returned from MyAnimeList.')
            return

        content_type = resp.headers.get('content-type')
        if content_type == 'application/xml; charset=UTF-8':
            data = parse_xml(resp.text.encode('utf-8'))
            log.debug('Parsed xml to list of dicts')
        else:
            log.warning('Content type not xml: %s' % content_type)
            data = ''

        if not isinstance(data, list):
            raise plugin.PluginError('Incompatible response: %r.' % data)

        entries = []
        for item in data:
            if item['my_status'] == maps['my_status'][status]:
                entry = Entry()
                entry.update_using_map(anime_map, item, ignore_none=True)

                names = item['series_synonyms']
                if names and ';' in names:
                    log.debug('Parsing series_synonyms: %s', names)
                    names = [n.strip() for n in names.split(';')]
                    names = [n for n in names if n and n != item['series_title']]
                    if names:
                        entry['configure_series_alternate_name'] = names
                    log.debug('Added alternate names: %r', names)

                if entry.isvalid():
                    entries.append(entry)
                    log.debug('Appended entry: %s', entry.get('title'))
                else:
                    log.debug('Invalid entry? %s', entry)

        log.debug('Returning %s entries', len(entries))
        return entries
Ejemplo n.º 28
0
    def list_entries(self):
        series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
                                                          self.config.get('port'), self.config['api_key'])
        json = self.get_json(series_url, series_headers)

        # Retrieves Sonarr's profile list if include_data is set to true
        if self.config.get('include_data'):
            profile_url, profile_headers = self.request_builder(self.config.get('base_url'), 'profile',
                                                                self.config.get('port'),
                                                                self.config['api_key'])
            profiles_json = self.get_json(profile_url, profile_headers)

        entries = []
        for show in json:
            fg_qualities = ''  # Initializes the quality parameter
            fg_cutoff = ''
            path = None
            if not show['monitored'] and self.config.get(
                    'only_monitored'):  # Checks if to retrieve just monitored shows
                continue
            if show['status'] == 'ended' and not self.config.get('include_ended'):  # Checks if to retrieve ended shows
                continue
            if self.config.get('include_data') and profiles_json:  # Check if to retrieve quality & path
                path = show.get('path')
                for profile in profiles_json:
                    if profile['id'] == show['profileId']:  # Get show's profile data from all possible profiles
                        fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)
            entry = Entry(title=show['title'],
                          url='',
                          series_name=show['title'],
                          tvdb_id=show.get('tvdbId'),
                          tvrage_id=show.get('tvRageId'),
                          tvmaze_id=show.get('tvMazeId'),
                          imdb_id=show.get('imdbid'),
                          slug=show.get('titleSlug'),
                          sonarr_id=show.get('id'),
                          configure_series_target=fg_cutoff)
            if self.config.get('include_data'):
                if len(fg_qualities) > 1:
                    entry['configure_series_qualities'] = fg_qualities
                elif len(fg_qualities) == 1:
                    entry['configure_series_quality'] = fg_qualities[0]
                else:
                    entry['configure_series_quality'] = fg_qualities
                if path:
                    entry['configure_series_path'] = path
            if entry.isvalid():
                log.debug('returning entry %s', entry)
                entries.append(entry)
            else:
                log.error('Invalid entry created? %s' % entry)
                continue

        return entries
Ejemplo n.º 29
0
def lookup_movie(title, session, identifiers=None):
    entry = Entry(title=title)
    if identifiers:
        for identifier in identifiers:
            for key, value in identifier.items():
                entry[key] = value
    try:
        imdb_lookup(entry, session=session)
    except PluginError:
        tmdb_lookup(entry)
    if entry.get('movie_name'):
        return entry
Ejemplo n.º 30
0
 def __call__(self, parser, namespace, values, option_string=None):
     kwargs = {'title': values.pop(0)}
     if values:
         kwargs['url'] = values.pop(0)
     else:
         kwargs['url'] = 'http://localhost/inject/%s' % ''.join(random.sample(string.letters + string.digits, 30))
     if 'force' in [v.lower() for v in values]:
         kwargs['immortal'] = True
     entry = Entry(**kwargs)
     if 'accept' in [v.lower() for v in values]:
         entry.accept(reason='accepted by --inject')
     setattr(namespace, self.dest, [entry])
Ejemplo n.º 31
0
    def _handle_path(self,
                     entries,
                     ftp,
                     baseurl,
                     path='',
                     mlst_supported=False,
                     files_only=False,
                     recursive=False,
                     encoding=None):
        try:
            dirs = ftp.nlst(path)
        except ftplib.error_perm as e:
            raise plugin.PluginWarning(str(e))

        if not dirs:
            log.verbose('Directory %s is empty', path)

        for p in dirs:
            if encoding:
                p = p.decode(encoding)

#Clean file list when subdirectories are used
            p = p.replace(path + '/', '')

            mlst = {}
            if mlst_supported:
                mlst_output = ftp.sendcmd('MLST ' + path + '/' + p)
                clean_mlst_output = [
                    line.strip().lower() for line in mlst_output.splitlines()
                ][1]
                mlst = self.parse_mlst(clean_mlst_output)
            else:
                element_is_directory = self.is_directory(ftp, path + '/' + p)
                if element_is_directory:
                    mlst['type'] = 'dir'
                    log.debug('%s is a directory', p)
                else:
                    mlst['type'] = 'file'
                    log.debug('%s is a file', p)

            if recursive and mlst.get('type') == 'dir':
                self._handle_path(entries, ftp, baseurl, path + '/' + p,
                                  mlst_supported, files_only, recursive,
                                  encoding)

            if not files_only or mlst.get('type') == 'file':
                url = baseurl + path + '/' + p
                url = url.replace(' ', '%20')
                title = os.path.basename(p)
                log.info('Accepting entry "%s" [%s]' % (
                    path + '/' + p,
                    mlst.get('type') or "unknown",
                ))
                entry = Entry(title, url)
                if not 'size' in mlst:
                    if mlst.get('type') == 'file':
                        entry['content_size'] = ftp.size(path + '/' +
                                                         p) / (1024 * 1024)
                        log.debug('(FILE) Size = %s', entry['content_size'])
                    elif mlst.get('type') == 'dir':
                        entry['content_size'] = self.get_folder_size(
                            ftp, path, p)
                        log.debug('(DIR) Size = %s', entry['content_size'])
                else:
                    entry['content_size'] = float(
                        mlst.get('size')) / (1024 * 1024)
                entries.append(entry)
Ejemplo n.º 32
0
    def on_task_input(self, task, config):
        # Don't edit the config, or it won't pass validation on rerun
        url_params = config.copy()
        if 'movies' in config and 'series' in config:
            raise plugin.PluginError(
                'Cannot use both series list and movies list in the same task.'
            )
        if 'movies' in config:
            url_params['data_type'] = 'movies'
            url_params['list_type'] = config['movies']
            map = self.movie_map
        elif 'series' in config:
            url_params['data_type'] = 'shows'
            url_params['list_type'] = config['series']
            map = self.series_map
        elif 'custom' in config:
            url_params['data_type'] = 'custom'
            # Do some translation from visible list name to prepare for use in url
            list_name = config['custom'].lower()
            # These characters are just stripped in the url
            for char in '!@#$%^*()[]{}/=?+\\|-_':
                list_name = list_name.replace(char, '')
            # These characters get replaced
            list_name = list_name.replace('&', 'and')
            list_name = list_name.replace(' ', '-')
            url_params['list_type'] = list_name
            # Map type is per item in custom lists
        else:
            raise plugin.PluginError(
                'Must define movie or series lists to retrieve from trakt.')

        url = 'http://api.trakt.tv/user/'
        auth = None
        if url_params['data_type'] == 'custom':
            url += 'list.json/%(api_key)s/%(username)s/%(list_type)s'
        elif url_params['list_type'] == 'watchlist':
            url += 'watchlist/%(data_type)s.json/%(api_key)s/%(username)s'
        else:
            url += 'library/%(data_type)s/%(list_type)s.json/%(api_key)s/%(username)s'
        url = url % url_params

        if 'password' in config:
            auth = {
                'username': config['username'],
                'password': hashlib.sha1(config['password']).hexdigest()
            }

        entries = []
        log.verbose('Retrieving list %s %s...' %
                    (url_params['data_type'], url_params['list_type']))

        try:
            result = task.requests.post(url, data=json.dumps(auth))
        except RequestException as e:
            raise plugin.PluginError(
                'Could not retrieve list from trakt (%s)' % e.message)
        try:
            data = result.json()
        except ValueError:
            log.debug('Could not decode json from response: %s', data.text)
            raise plugin.PluginError('Error getting list from trakt.')

        def check_auth():
            if task.requests.post(
                    'http://api.trakt.tv/account/test/' + config['api_key'],
                    data=json.dumps(auth),
                    raise_status=False).status_code != 200:
                raise plugin.PluginError('Authentication to trakt failed.')

        if 'error' in data:
            check_auth()
            raise plugin.PluginError('Error getting trakt list: %s' %
                                     data['error'])
        if not data:
            check_auth()
            log.warning('No data returned from trakt.')
            return
        if url_params['data_type'] == 'custom':
            if not isinstance(data['items'], list):
                raise plugin.PluginError(
                    'Faulty custom items in response: %s' % data['items'])
            data = data['items']
        for item in data:
            if url_params['data_type'] == 'custom':
                if item['type'] == 'movie':
                    map = self.movie_map
                    item = item['movie']
                else:
                    map = self.series_map
                    item = item['show']
            entry = Entry()
            entry.update_using_map(map, item)
            if entry.isvalid():
                if config.get('strip_dates'):
                    # Remove year from end of name if present
                    entry['title'] = re.sub('\s+\(\d{4}\)$', '',
                                            entry['title'])
                entries.append(entry)

        return entries
Ejemplo n.º 33
0
 def on_task_input(self, task, config):
     return [Entry('test entry', 'fake url')]
Ejemplo n.º 34
0
    def on_task_input(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled']:
            return

        if not self.client:
            self.client = self.create_rpc_client(config)
        entries = []

        # Hack/Workaround for http://flexget.com/ticket/2002
        # TODO: Proper fix
        if 'username' in config and 'password' in config:
            self.client.http_handler.set_authentication(
                self.client.url, config['username'], config['password'])

        session = self.client.get_session()

        for torrent in self.client.get_torrents():
            seed_ratio_ok, idle_limit_ok = self.check_seed_limits(
                torrent, session)
            if config['only_complete'] and not (seed_ratio_ok and idle_limit_ok
                                                and torrent.progress == 100):
                continue
            entry = Entry(
                title=torrent.name,
                url='',
                torrent_info_hash=torrent.hashString,
                content_size=torrent.totalSize / (1024 * 1024),
            )
            # Location of torrent is only valid if transmission is on same machine as flexget
            if config['host'] in ('localhost', '127.0.0.1'):
                entry['location'] = torrent.torrentFile
                entry['url'] = 'file://' + torrent.torrentFile
            for attr in [
                    'id',
                    'comment',
                    'downloadDir',
                    'isFinished',
                    'isPrivate',
                    'ratio',
                    'status',
                    'date_active',
                    'date_added',
                    'date_done',
                    'date_started',
                    'priority',
                    'progress',
                    'secondsDownloading',
                    'secondsSeeding',
            ]:
                try:
                    entry['transmission_' + attr] = getattr(torrent, attr)
                except Exception:
                    log.debug(
                        'error when requesting transmissionrpc attribute %s',
                        attr,
                        exc_info=True)
            entry['transmission_trackers'] = [
                t['announce'] for t in torrent.trackers
            ]
            entry['transmission_seed_ratio_ok'] = seed_ratio_ok
            entry['transmission_idle_limit_ok'] = idle_limit_ok
            # Built in done_date doesn't work when user adds an already completed file to transmission
            if torrent.progress == 100:
                entry['transmission_date_done'] = datetime.fromtimestamp(
                    max(torrent.addedDate, torrent.doneDate))
                dummy, bff = self.torrent_info(torrent, config)
                if bff:
                    entry['bigfile_location'] = bff
            entries.append(entry)
        return entries
Ejemplo n.º 35
0
    def search(self, task, entry, config=None):
        """
            Search for entries on SceneAccess
        """

        session = task.requests

        if 'sceneaccess.eu' not in session.domain_limiters:
            session.add_domain_limiter(
                TimedLimiter('sceneaccess.eu', '7 seconds'))

        if not session.cookies:
            log.debug('Logging in to %s...' % URL)
            params = {
                'username': config['username'],
                'password': config['password'],
                'submit': 'come on in'
            }
            session.post(URL + 'login', data=params)

        if 'gravity_multiplier' in config:
            multip = config['gravity_multiplier']
        else:
            multip = 1

        # Prepare queries...
        base_urls = list()
        entries = set()
        for category in self.process_categories(config):
            base_urls.append(URL +
                             '%(url_path)s?method=2%(category_url_string)s' %
                             category)

        # Search...
        for search_string in entry.get('search_strings', [entry['title']]):
            search_string_normalized = normalize_unicode(
                clean_title(search_string))
            search_string_url_fragment = '&search=' + quote(
                search_string_normalized.encode('utf8'))

            for url in base_urls:
                url += search_string_url_fragment
                log.debug('Search URL for `%s`: %s' % (search_string, url))

                page = session.get(url).content
                soup = get_soup(page)

                for result in soup.findAll('tr', attrs={'class': 'tt_row'}):
                    entry = Entry()
                    entry['title'] = result.find(
                        'a', href=re.compile(r'details\?id=\d+'))['title']
                    entry['url'] = URL + result.find(
                        'a', href=re.compile(r'.torrent$'))['href']

                    entry['torrent_seeds'] = result.find('td',
                                                         attrs={
                                                             'class':
                                                             'ttr_seeders'
                                                         }).text
                    entry['torrent_leeches'] = result.find('td',
                                                           attrs={
                                                               'class':
                                                               'ttr_leechers'
                                                           }).text
                    entry['search_sort'] = torrent_availability(
                        entry['torrent_seeds'],
                        entry['torrent_leeches']) * multip

                    size = result.find('td', attrs={'class': 'ttr_size'}).text
                    size = re.search('(\d+(?:[.,]\d+)*)\s?([KMG]B)', size)

                    entry['content_size'] = parse_filesize(size.group(0))

                    entries.add(entry)

        return entries
Ejemplo n.º 36
0
    def search(self, task, entry, config=None):
        config = self.prepare_config(config)

        if not session.cookies:
            log.debug('Logging in to %s...' % URL)
            params = {
                'username': config['username'],
                'password': config['password'],
                'keeplogged': '1',
                'login': '******'
            }
            session.post(URL + 'login.php', data=params)

        cat = ''.join([
            '&' + ('filter_cat[%s]' % id) + '=1' for id in config['category']
        ])
        rls = 'release_type=' + config['type']
        url_params = rls + cat
        multip = config['gravity_multiplier']

        entries = set()
        for search_string in entry.get('search_strings', [entry['title']]):
            srch = normalize_unicode(clean_title(search_string))
            srch = '&searchstr=' + quote(srch.encode('utf8'))

            url = URL + 'torrents.php?' + url_params + srch
            log.debug('Fetching URL for `%s`: %s' % (search_string, url))

            page = session.get(url).content
            soup = get_soup(page)

            for result in soup.findAll('tr', attrs={'class': 'torrent'}):
                entry = Entry()
                entry['title'] = result.find('span',
                                             attrs={
                                                 'class': 'torrent_name_link'
                                             }).text
                entry['url'] = URL + result.find(
                    'a', href=re.compile(
                        'torrents\.php\?action=download')).get('href')
                entry['torrent_seeds'], entry['torrent_leeches'] = [
                    r.text for r in result.findAll('td')[-2:]
                ]
                entry['search_sort'] = torrent_availability(
                    entry['torrent_seeds'], entry['torrent_leeches']) * multip

                size = result.findAll('td')[-4].text
                size = re.search('(\d+(?:[.,]\d+)*)\s?([KMG]B)', size)

                if size:
                    if size.group(2) == 'GB':
                        entry['content_size'] = int(
                            float(size.group(1).replace(',', '')) * 1000**3 /
                            1024**2)
                    elif size.group(2) == 'MB':
                        entry['content_size'] = int(
                            float(size.group(1).replace(',', '')) * 1000**2 /
                            1024**2)
                    elif size.group(2) == 'KB':
                        entry['content_size'] = int(
                            float(size.group(1).replace(',', '')) * 1000 /
                            1024**2)
                    else:
                        entry['content_size'] = int(
                            float(size.group(1).replace(',', '')) / 1024**2)

                entries.add(entry)
        return entries
Ejemplo n.º 37
0
    def search(self, entry, config=None):
        """
        Search for name from torrentleech.
        """
        rss_key = config['rss_key']

        # build the form request:
        data = {
            'username': config['username'],
            'password': config['password'],
            'remember_me': 'on',
            'submit': 'submit'
        }
        # POST the login form:
        login = requests.post('http://torrentleech.org/', data=data)

        if not isinstance(config, dict):
            config = {}
        # sort = SORT.get(config.get('sort_by', 'seeds'))
        # if config.get('sort_reverse'):
        # sort += 1
        if isinstance(config.get('category'), int):
            category = config['category']
        else:
            category = CATEGORIES.get(config.get('category', 'all'))
        filter_url = '/categories/%d' % (category)

        query = normalize_unicode(entry['title'])
        # urllib.quote will crash if the unicode string has non ascii characters, so encode in utf-8 beforehand
        url = 'http://torrentleech.org/torrents/browse/index/query/' + urllib.quote(
            query.encode('utf-8')) + filter_url
        log.debug('Using %s as torrentleech search url' % url)

        page = requests.get(url, cookies=login.cookies).content
        soup = get_soup(page)

        entries = []
        for tr in soup.find_all("tr", ["even", "odd"]):
            # within each even or odd row, find the torrent names
            link = tr.find("a", attrs={'href': re.compile('/torrent/\d+')})
            log.debug('link phase: %s' % link.contents[0])
            entry = Entry()
            # extracts the contents of the <a>titlename/<a> tag
            entry['title'] = link.contents[0]

            # find download link
            torrent_url = tr.find("a",
                                  attrs={
                                      'href': re.compile('/download/\d+/.*')
                                  }).get('href')
            # parse link and split along /download/12345 and /name.torrent
            download_url = re.search('(/download/\d+)/(.+\.torrent)',
                                     torrent_url)
            # change link to rss and splice in rss_key
            torrent_url = 'http://torrentleech.org/rss' + download_url.group(
                1) + '/' + rss_key + '/' + download_url.group(2)
            log.debug('RSS-ified download link: %s' % torrent_url)
            entry['url'] = torrent_url

            # us tr object for seeders/leechers
            seeders, leechers = tr.find_all('td', ["seeders", "leechers"])
            entry['torrent_seeds'] = int(seeders.contents[0])
            entry['torrent_leeches'] = int(leechers.contents[0])
            entry['search_sort'] = torrent_availability(
                entry['torrent_seeds'], entry['torrent_leeches'])

            # use tr object for size
            size = tr.find("td",
                           text=re.compile('([\.\d]+) ([GMK])B')).contents[0]
            size = re.search('([\.\d]+) ([GMK])B', size)
            if size:
                if size.group(2) == 'G':
                    entry['content_size'] = int(
                        float(size.group(1)) * 1000**3 / 1024**2)
                elif size.group(2) == 'M':
                    entry['content_size'] = int(
                        float(size.group(1)) * 1000**2 / 1024**2)
                else:
                    entry['content_size'] = int(
                        float(size.group(1)) * 1000 / 1024**2)
            entries.append(entry)

        if not entries:
            dashindex = query.rfind('-')
            if dashindex != -1:
                return self.search(query[:dashindex])
            else:
                raise PluginWarning('No close matches for %s' % query,
                                    log,
                                    log_once=True)

        entries.sort(reverse=True, key=lambda x: x.get('search_sort'))

        return entries
Ejemplo n.º 38
0
    def search(self, task, entry, config):
        """
            Search for entries on RarBG
        """

        categories = config.get('category', 'all')
        # Ensure categories a list
        if not isinstance(categories, list):
            categories = [categories]
        # Convert named category to its respective category id number
        categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
        category_url_fragment = ';'.join(str(c) for c in categories)

        entries = set()

        token = self.get_token()
        if not token:
            log.error('Could not retrieve token. Abandoning search.')
            return entries

        params = {'mode': 'search', 'token': token, 'ranked': int(config['ranked']),
                  'min_seeders': config['min_seeders'], 'min_leechers': config['min_leechers'],
                  'sort': config['sorted_by'], 'category': category_url_fragment, 'format': 'json_extended',
                  'app_id': 'flexget'}

        for search_string in entry.get('search_strings', [entry['title']]):
            params.pop('search_string', None)
            params.pop('search_imdb', None)
            params.pop('search_tvdb', None)

            if entry.get('movie_name'):
                params['search_imdb'] = entry.get('imdb_id')
            else:
                query = normalize_scene(search_string)
                query_url_fragment = query.encode('utf8')
                params['search_string'] = query_url_fragment
                if config['use_tvdb']:
                    plugin.get_plugin_by_name('thetvdb_lookup').instance.lazy_series_lookup(entry)
                    params['search_tvdb'] = entry.get('tvdb_id')
                    log.debug('Using tvdb id %s', entry.get('tvdb_id'))
            try:
                page = requests.get(self.base_url, params=params)
                log.debug('requesting: %s', page.url)
            except RequestException as e:
                log.error('RarBG request failed: %s' % e.args[0])
                continue
            r = page.json()
            # error code 20 just means no results were found
            if r.get('error_code') == 20:
                searched_string = params.get('search_string') or 'imdb={0}'.format(params.get('search_imdb')) or \
                    'tvdb={0}'.format(params.get('tvdb_id'))
                log.debug('No results found for %s', searched_string)
                continue
            elif r.get('error'):
                log.error('Error code %s: %s', r.get('error_code'), r.get('error'))
                continue
            else:
                for result in r.get('torrent_results'):
                    e = Entry()

                    e['title'] = result.get('title')
                    e['url'] = result.get('download')
                    e['torrent_seeds'] = int(result.get('seeders'))
                    e['torrent_leeches'] = int(result.get('leechers'))
                    e['content_size'] = int(result.get('size')) / 1024 / 1024
                    episode_info = result.get('episode_info')
                    if episode_info:
                        e['imdb_id'] = episode_info.get('imdb')
                        e['tvdb_id'] = episode_info.get('tvdb')
                        e['tvrage_id'] = episode_info.get('tvrage')

                    entries.add(e)

        return entries
Ejemplo n.º 39
0
    def search(self, task, entry, config):
        """
            Search for entries on 1337x
        """

        if not isinstance(config, dict):
            config = {}

        order_by = ''
        sort_order = ''
        if isinstance(config.get('order_by'), str):
            if config['order_by'] != 'leechers':
                order_by = '/{0}/desc'.format(config['order_by'])
                sort_order = 'sort-'

        entries = set()

        for search_string in entry.get('search_strings', [entry['title']]):

            query = '{0}search/{1}{2}/1/'.format(
                sort_order, quote(search_string.encode('utf8')), order_by)
            logger.debug('Using search params: {}; ordering by: {}',
                         search_string, order_by or 'default')
            try:
                page = task.requests.get(self.base_url + query)
                logger.debug('requesting: {}', page.url)
            except RequestException as e:
                logger.error('1337x request failed: {}', e)
                continue

            soup = get_soup(page.content)
            if soup.find('div', attrs={'class':
                                       'table-list-wrap'}) is not None:
                for link in soup.find('div',
                                      attrs={
                                          'class': 'table-list-wrap'
                                      }).findAll(
                                          'a', href=re.compile('^/torrent/')):
                    li = link.parent.parent

                    title = str(link.text).replace('...', '')
                    info_url = self.base_url + str(link.get('href'))[1:]
                    seeds = int(li.find('td', class_='seeds').string)
                    leeches = int(li.find('td', class_='leeches').string)
                    size = str(li.find('td', class_='coll-4').contents[0])

                    size = parse_filesize(size)

                    e = Entry()

                    e['url'] = info_url
                    e['title'] = title
                    e['torrent_seeds'] = seeds
                    e['torrent_leeches'] = leeches
                    e['torrent_availability'] = torrent_availability(
                        e['torrent_seeds'], e['torrent_leeches'])
                    e['content_size'] = size

                    entries.add(e)

        return entries
Ejemplo n.º 40
0
    def on_task_input(self, task, config):

        # Let details plugin know that it is ok if this task doesn't produce any entries
        task.no_entries_ok = True

        filename = os.path.expanduser(config['file'])
        encoding = config.get('encoding', None)
        file = open(filename, 'r')

        last_pos = task.simple_persistence.setdefault(filename, 0)
        if os.path.getsize(filename) < last_pos:
            log.info(
                'File size is smaller than in previous execution, reseting to beginning of the file'
            )
            last_pos = 0

        file.seek(last_pos)

        log.debug('continuing from last position %s' % last_pos)

        entry_config = config.get('entry')
        format_config = config.get('format', {})

        # keep track what fields have been found
        used = {}
        entries = []
        entry = Entry()

        # now parse text

        while True:
            line = file.readline()
            if encoding:
                try:
                    line = line.decode(encoding)
                except UnicodeError:
                    raise PluginError(
                        'Failed to decode file using %s. Check encoding.' %
                        encoding)

            if not line:
                task.simple_persistence[filename] = file.tell()
                break

            for field, regexp in entry_config.iteritems():
                #log.debug('search field: %s regexp: %s' % (field, regexp))
                match = re.search(regexp, line)
                if match:
                    # check if used field detected, in such case start with new entry
                    if field in used:
                        if entry.isvalid():
                            log.info(
                                'Found field %s again before entry was completed. \
                                      Adding current incomplete, but valid entry and moving to next.'
                                % field)
                            self.format_entry(entry, format_config)
                            entries.append(entry)
                        else:
                            log.info(
                                'Invalid data, entry field %s is already found once. Ignoring entry.'
                                % field)
                        # start new entry
                        entry = Entry()
                        used = {}

                    # add field to entry
                    entry[field] = match.group(1)
                    used[field] = True
                    log.debug('found field: %s value: %s' %
                              (field, entry[field]))

                # if all fields have been found
                if len(used) == len(entry_config):
                    # check that entry has at least title and url
                    if not entry.isvalid():
                        log.info(
                            'Invalid data, constructed entry is missing mandatory fields (title or url)'
                        )
                    else:
                        self.format_entry(entry, format_config)
                        entries.append(entry)
                        log.debug('Added entry %s' % entry)
                        # start new entry
                        entry = Entry()
                        used = {}
        return entries
Ejemplo n.º 41
0
    def on_task_input(self, task, config):
        config = self.prepare_config(config)
        urlconfig = {}
        urlappend = "?"
        entries = []
        if config['unwatched_only'] and config['section'] != 'recentlyViewedShows' and config['section'] != 'all':
            urlconfig['unwatched'] = '1'
        if config['token']:
            accesstoken = config['token']
            log.debug("Using accesstoken: %s", accesstoken)
            urlconfig['X-Plex-Token'] = accesstoken
        elif config['username'] and config['password']:
            accesstoken = self.plex_get_accesstoken(config)
            log.debug("Got accesstoken: %s", accesstoken)
            urlconfig['X-Plex-Token'] = accesstoken

        for key in urlconfig:
            urlappend += '%s=%s&' % (key, urlconfig[key])
        if not self.plex_section_is_int(config['section']):
            try:
                path = "/library/sections/"
                r = requests.get("http://%s:%d%s%s" % (config['plexserver'], config['port'], path, urlappend))
            except requests.RequestException as e:
                raise plugin.PluginError('Error retrieving source: %s' % e)
            dom = parseString(r.text.encode("utf-8"))
            for node in dom.getElementsByTagName('Directory'):
                if node.getAttribute('title') == config['section']:
                    config['section'] = int(node.getAttribute('key'))
        if not self.plex_section_is_int(config['section']):
            raise plugin.PluginError('Could not find section \'%s\'' % config['section'])

        log.debug("Fetching http://%s:%d/library/sections/%s/%s%s",
                  config['server'], config['port'], config['section'], config['selection'], urlappend)
        try:
            path = "/library/sections/%s/%s" % (config['section'], config['selection'])
            r = requests.get("http://%s:%d%s%s" % (config['plexserver'], config['port'], path, urlappend))
        except requests.RequestException as e:
            raise plugin.PluginError('There is no section with number %d. (%s)' % (config['section'], e))
        dom = parseString(r.text.encode("utf-8"))
        plexsectionname = dom.getElementsByTagName('MediaContainer')[0].getAttribute('title1')
        viewgroup = dom.getElementsByTagName('MediaContainer')[0].getAttribute('viewGroup')

        log.debug("Plex section \"%s\" is a \"%s\" section", plexsectionname, viewgroup)
        if viewgroup != "movie" and viewgroup != "show" and viewgroup != "episode":
            raise plugin.PluginError("Section is neither a movie nor tv show section!")
        domroot = "Directory"
        titletag = "title"
        if viewgroup == "episode":
            domroot = "Video"
            titletag = "grandparentTitle"
            thumbtag = "thumb"
            arttag = "art"
            seasoncovertag = "parentThumb"
            covertag = "grandparentThumb"
        elif viewgroup == "movie":
            domroot = "Video"
            titletag = "title"
            arttag = "art"
            seasoncovertag = "thumb"
            covertag = "thumb"
            if config['fetch'] == "thumb":
                raise plugin.PluginError("Movie sections does not have any thumbnails to download!")
        for node in dom.getElementsByTagName(domroot):
            e = Entry()
            e['plex_server'] = config['plexserver']
            e['plex_port'] = config['port']
            e['plex_section'] = config['section']
            e['plex_section_name'] = plexsectionname
            e['plex_episode_thumb'] = ''

            title = node.getAttribute(titletag)
            if config['strip_year']:
                title = re.sub(r'^(.*)\(\d{4}\)(.*)', r'\1\2', title)
            if config['strip_parens']:
                title = re.sub(r'\(.*?\)', r'', title)
                title = title.strip()
            if config['strip_non_alpha']:
                title = re.sub(r'[\(\)]', r'', title)
                title = re.sub(r'&', r'And', title)
                title = re.sub(r'[^A-Za-z0-9- \']', r'', title)
            if config['lowercase_title']:
                title = title.lower()
            if viewgroup == "show":
                e['title'] = title
                e['url'] = 'NULL'
                entries.append(e)
                # show ends here.
                continue
            e['plex_art'] = "http://%s:%d%s%s" % (config['server'], config['port'],
                                                  node.getAttribute(arttag), urlappend)
            e['plex_cover'] = "http://%s:%d%s%s" % (config['server'], config['port'],
                                                    node.getAttribute(covertag), urlappend)
            e['plex_season_cover'] = "http://%s:%d%s%s" % (config['server'], config['port'],
                                                           node.getAttribute(seasoncovertag), urlappend)
            if viewgroup == "episode":
                e['plex_thumb'] = "http://%s:%d%s%s" % (
                    config['server'], config['port'], node.getAttribute('thumb'), urlappend)
                e['series_name'] = title
                e['plex_ep_name'] = node.getAttribute('title')
                season = int(node.getAttribute('parentIndex'))
                if node.getAttribute('parentIndex') == node.getAttribute('year'):
                    season = node.getAttribute('originallyAvailableAt')
                    filenamemap = "%s_%s%s_%s_%s_%s.%s"
                    episode = ""
                    e['series_id_type'] = 'date'
                    e['series_date'] = season
                elif node.getAttribute('index'):
                    episode = int(node.getAttribute('index'))
                    filenamemap = "%s_%02dx%02d_%s_%s_%s.%s"
                    e['series_season'] = season
                    e['series_episode'] = episode
                    e['series_id_type'] = 'ep'
                    e['series_id'] = 'S%02dE%02d' % (season, episode)
                else:
                    log.debug("Could not get episode number for '%s' (Hint, ratingKey: %s)",
                              title, node.getAttribute('ratingKey'))
                    break
            elif viewgroup == "movie":
                filenamemap = "%s_%s_%s_%s.%s"

            e['plex_year'] = node.getAttribute('year')
            e['plex_added'] = datetime.fromtimestamp(int(node.getAttribute('addedAt')))
            e['plex_duration'] = node.getAttribute('duration')
            e['plex_summary'] = node.getAttribute('summary')
            e['plex_userrating'] = node.getAttribute('userrating')
            e['plex_key'] = node.getAttribute('ratingKey')
            count = node.getAttribute('viewCount')
            offset = node.getAttribute('viewOffset')
            if count:
                e['plex_status'] = "seen"
            elif offset:
                e['plex_status'] = "inprogress"
            else:
                e['plex_status'] = "unwatched"
            for media in node.getElementsByTagName('Media'):
                entry = Entry(e)
                vcodec = media.getAttribute('videoCodec')
                acodec = media.getAttribute('audioCodec')
                if media.hasAttribute('title'):
                    entry['plex_media_title'] = media.getAttribute('title')
                if media.hasAttribute('optimizedForStreaming'):
                    entry['plex_stream_optimized'] = media.getAttribute('optimizedForStreaming')
                if config['fetch'] == "file" or not config['fetch']:
                    container = media.getAttribute('container')
                else:
                    container = "jpg"
                resolution = media.getAttribute('videoResolution') + "p"
                for part in media.getElementsByTagName('Part'):
                    if config['fetch'] == "file" or not config['fetch']:
                        key = part.getAttribute('key')
                    elif config['fetch'] == "art":
                        key = node.getAttribute(arttag)
                    elif config['fetch'] == "cover":
                        key = node.getAttribute(arttag)
                    elif config['fetch'] == "season_cover":
                        key = node.getAttribute(seasoncovertag)
                    elif config['fetch'] == "thumb":
                        key = node.getAttribute(thumbtag)
                    # key = part.getAttribute('key')
                    duration = part.getAttribute('duration')
                    entry['plex_title'] = title
                    entry['title'] = title
                    if config['original_filename']:
                        filename, fileext = os.path.splitext(basename(part.getAttribute('file')))
                        if config['fetch'] != 'file':
                            filename += ".jpg"
                        else:
                            filename = "%s%s" % (filename, fileext)
                    else:
                        if viewgroup == "episode":
                            filename = filenamemap % (title.replace(" ", "."), season, episode, resolution, vcodec,
                                                      acodec, container)
                            entry['title'] = filename
                        elif viewgroup == "movie":
                            filename = filenamemap % (title.replace(" ", "."), resolution, vcodec,
                                                      acodec, container)
                            entry['title'] = filename
                    entry['plex_url'] = "http://%s:%d%s%s" % (config['server'], config['port'], key, urlappend)
                    entry['plex_path'] = key
                    entry['url'] = "http://%s:%d%s%s" % (config['server'], config['port'], key, urlappend)
                    entry['plex_duration'] = duration
                    entry['filename'] = filename
                    if key == "":
                        log.debug("Could not find anything in PMS to download. Next!")
                    else:
                        entries.append(entry)
        return entries
Ejemplo n.º 42
0
    def _parse_tiles(self, task, config, tiles, series_info):
        max_age = config.get('max_episode_age_days')
        download_premium = config.get('download_premium')
        entries = []

        if tiles is not None:
            for tile in tiles:
                # there is only one list_item per tile
                for list_item in get_soup(tile).findAll(
                        'div', class_='npo-asset-tile-container'):
                    episode_id = list_item['data-id']
                    premium = 'npo-premium-content' in list_item['class']
                    logger.debug('Parsing episode: {}', episode_id)

                    url = list_item.find('a')['href']
                    # Check if the URL found to the episode matches the expected pattern
                    if len(url.split('/')) != 6:
                        logger.verbose(
                            'Skipping {}, the URL has an unexpected pattern: {}',
                            episode_id, url)
                        continue  # something is wrong; skip this episode

                    episode_name = list_item.find('h2')
                    if episode_name:
                        title = '{} ({})'.format(
                            next(episode_name.stripped_strings), episode_id)
                    else:
                        title = '{}'.format(episode_id)

                    timer = '0'
                    timerdiv = list_item.find('div',
                                              class_='npo-asset-tile-timer')
                    if timerdiv:  # some entries are missing a running time
                        timer = next(timerdiv.stripped_strings)
                    remove_url = list_item.find('div',
                                                class_='npo-asset-tile-delete')

                    if premium and not download_premium:
                        logger.debug(
                            'Skipping {}, no longer available without premium',
                            title)
                        continue

                    entry_date = url.split('/')[-2]
                    entry_date = self._parse_date(entry_date)

                    if max_age >= 0 and date.today() - entry_date > timedelta(
                            days=max_age):
                        logger.debug('Skipping {}, aired on {}', title,
                                     entry_date)
                        continue

                    e = Entry(series_info)
                    e['url'] = url
                    e['title'] = title
                    e['series_name'] = series_info['npo_name']
                    e['series_name_plain'] = self._convert_plain(
                        series_info['npo_name'])
                    e['series_date'] = entry_date
                    e['series_id_type'] = 'date'
                    e['npo_id'] = episode_id
                    e['npo_premium'] = premium
                    e['npo_runtime'] = timer.strip('min').strip()
                    e['language'] = series_info['npo_language']

                    if remove_url and remove_url['data-link']:
                        e['remove_url'] = remove_url['data-link']
                        if config.get('remove_accepted'):
                            e.on_complete(self.entry_complete, task=task)
                    entries.append(e)

        return entries
Ejemplo n.º 43
0
    def on_task_input(self, task, config):
        # Turn simple config into full config
        if isinstance(config, str):
            config = {'quality': config}

        try:
            r = task.requests.get(self.rss_url)
        except RequestException as e:
            raise plugin.PluginError(
                'Retrieving Apple Trailers RSS feed failed: %s' % e)

        rss = feedparser.parse(r.content)

        if rss.get('bozo_exception', False):
            raise plugin.PluginError('Got bozo_exception (bad feed)')

        filmid_regex = re.compile(r'(FilmId\s*\=\s*\')(\d+)(?=\')')
        studio_regex = re.compile(r'(?:[0-9]*\s*)(.+)')
        # use the following dict to save json object in case multiple trailers have been released for the same movie
        # no need to do multiple requests for the same thing!
        trailers = {}
        entries = []
        for item in rss.entries:
            entry = Entry()
            movie_url = item['link']
            entry['title'] = item['title']
            entry['movie_name'], entry['apple_trailers_name'] = entry[
                'title'].split(' - ', 1)
            if not trailers.get(movie_url):
                try:
                    movie_page = task.requests.get(movie_url).text
                    match = filmid_regex.search(movie_page)
                    if match:
                        json_url = self.movie_data_url + match.group(
                            2) + '.json'
                        movie_data = task.requests.get(json_url).json()

                        trailers[movie_url] = {
                            'json_url': json_url,
                            'json': movie_data
                        }
                    else:
                        self.broken('FilmId not found for {0}'.format(
                            entry['movie_name']))

                except RequestException as e:
                    log.error('Failed to get trailer %s: %s', entry['title'],
                              e.args[0])
                    continue
            else:
                movie_data = trailers[movie_url]['json']
            genres = {
                genre.get('name')
                for genre in movie_data.get('details').get('genres')
            }
            config_genres = set(config.get('genres', []))
            if genres and config_genres and not set.intersection(
                    config_genres, genres):
                log.debug('Config genre(s) do not match movie genre(s)')
                continue

            desired_quality = config['quality']
            # find the trailer url
            for clip in movie_data.get('clips'):
                if clip.get('title') == entry['apple_trailers_name']:
                    try:
                        trailer_url = clip['versions']['enus']['sizes'][
                            self.qualities[desired_quality]]
                        src = trailer_url.get('src')
                        src_alt = trailer_url.get('srcAlt')
                        # .mov tends to be a streaming video file, but the real video file is the same url, but
                        # they prepend 'h' to the quality
                        if src.split('.')[-1] == 'mov':
                            entry['url'] = src.replace(desired_quality,
                                                       'h' + desired_quality)
                        elif src_alt.split('.')[-1] == 'mov':
                            entry['url'] = src_alt.replace(
                                desired_quality, 'h' + desired_quality)
                        else:
                            continue  # just continue until we reach the else part of the for-else
                        break
                    except KeyError as e:
                        self.broken(e.args[0])
            else:
                log.error('Trailer "%s" not found',
                          entry['apple_trailers_name'])
                continue

            # set some entry fields if present
            # studio is usually also the copyright holder
            studio = studio_regex.match(
                movie_data.get('page').get('copyright'))
            if studio:
                entry['movie_studio'] = studio.group(1)

            release_date = movie_data.get('page').get('release_date')
            if release_date:
                entry['release_date'] = release_date

            if genres:
                entry['genres'] = ', '.join(list(genres))

            # set the correct header without modifying the task.requests obj
            entry['download_auth'] = AppleTrailersHeader()
            entries.append(entry)

        return entries
Ejemplo n.º 44
0
 def test_encoding(self):
     e = Entry('title', 'url')
     e['invalid'] = b'\x8e'
Ejemplo n.º 45
0
    def search(self, task, entry, config):
        """
            Search for entries on FileList.ro
        """
        entries = list()

        params = {
            'cat': CATEGORIES[config['category']],
            'incldead': int(config['include_dead']),
            'order_by': SORTING[config['order_by']],
            'searchin': SEARCH_IN[config['search_in']],
            'asc': int(config['order_ascending']),
        }

        for search_string in entry.get('search_strings', [entry['title']]):
            params['search'] = search_string
            logger.debug('Using search params: {}', params)
            try:
                page = self.get(
                    BASE_URL + 'browse.php', params, config['username'], config['password']
                )
                logger.debug('requesting: {}', page.url)
            except RequestException as e:
                logger.error('FileList.ro request failed: {}', e)
                continue

            soup = get_soup(page.content)
            for result in soup.findAll('div', attrs={'class': 'torrentrow'}):
                e = Entry()

                torrent_info = result.findAll('div', attrs={'class': 'torrenttable'})

                # genres
                genres = torrent_info[1].find('font')
                if genres:
                    genres = genres.text.lstrip('[').rstrip(']').replace(' ', '')
                    genres = genres.split('|')

                tags = torrent_info[1].findAll('img')
                freeleech = False
                internal = False
                for tag in tags:
                    if tag.get('alt', '').lower() == 'freeleech':
                        freeleech = True
                    if tag.get('alt', '').lower() == 'internal':
                        internal = True

                title = torrent_info[1].find('a').get('title')
                # this is a dirty fix to get the full title since their developer is a moron
                if re.match(r"\<img src=\'.*\'\>", title):
                    title = torrent_info[1].find('b').text
                    # if the title is shortened, then do a request to get the full one :(
                    if title.endswith('...'):
                        url = BASE_URL + torrent_info[1].find('a')['href']
                        try:
                            request = self.get(url, {}, config['username'], config['password'])
                        except RequestException as e:
                            logger.error('FileList.ro request failed: {}', e)
                            continue
                        title_soup = get_soup(request.content)
                        title = title_soup.find('div', attrs={'class': 'cblock-header'}).text

                e['title'] = title
                e['url'] = (
                    BASE_URL + torrent_info[3].find('a')['href'] + '&passkey=' + config['passkey']
                )
                e['content_size'] = parse_filesize(torrent_info[6].find('font').text)

                e['torrent_snatches'] = int(
                    torrent_info[7]
                    .find('font')
                    .text.replace(' ', '')
                    .replace('times', '')
                    .replace(',', '')
                )
                e['torrent_seeds'] = int(torrent_info[8].find('span').text)
                e['torrent_leeches'] = int(torrent_info[9].find('span').text)
                e['torrent_internal'] = internal
                e['torrent_freeleech'] = freeleech
                if genres:
                    e['torrent_genres'] = genres

                entries.append(e)

        return entries
Ejemplo n.º 46
0
    def _handle_path(self,
                     entries,
                     ftp,
                     baseurl,
                     path='',
                     mlst_supported=False,
                     files_only=False,
                     recursive=False,
                     get_size=True,
                     encoding=None):
        dirs = self.list_directory(ftp, path)

        for p in dirs:
            if encoding:
                p = p.decode(encoding)

            # Clean file list when subdirectories are used
            p = p.replace(path + '/', '')

            mlst = {}
            if mlst_supported:
                mlst_output = ftp.sendcmd('MLST ' + path + '/' + p)
                clean_mlst_output = [
                    line.strip().lower() for line in mlst_output.splitlines()
                ][1]
                mlst = self.parse_mlst(clean_mlst_output)
            else:
                element_is_directory = self.is_directory(ftp, path + '/' + p)
                if element_is_directory:
                    mlst['type'] = 'dir'
                    log.debug('%s is a directory', p)
                else:
                    mlst['type'] = 'file'
                    log.debug('%s is a file', p)

            if recursive and mlst.get('type') == 'dir':
                self._handle_path(entries, ftp, baseurl, path + '/' + p,
                                  mlst_supported, files_only, recursive,
                                  get_size, encoding)

            if not files_only or mlst.get('type') == 'file':
                url = baseurl + urllib.quote(path) + '/' + urllib.quote(p)
                log.debug("Encoded URL: " + url)
                title = os.path.basename(p)
                log.info('Accepting entry "%s" [%s]' % (
                    path + '/' + p,
                    mlst.get('type') or "unknown",
                ))
                entry = Entry(title, url)
                if get_size and 'size' not in mlst:
                    if mlst.get('type') == 'file':
                        entry['content_size'] = ftp.size(path + '/' +
                                                         p) / (1024 * 1024)
                        log.debug('(FILE) Size = %s', entry['content_size'])
                    elif mlst.get('type') == 'dir':
                        entry['content_size'] = self.get_folder_size(
                            ftp, path, p)
                        log.debug('(DIR) Size = %s', entry['content_size'])
                elif get_size:
                    entry['content_size'] = float(
                        mlst.get('size')) / (1024 * 1024)
                entries.append(entry)
Ejemplo n.º 47
0
    def on_task_input(self, task, config):
        url = config['url']
        if '://' in url:
            lines = task.requests.get(url).iter_lines()
        else:
            lines = open(url, 'rb').readlines()

        entry_config = config.get('entry')
        format_config = config.get('format', {})

        entries = []
        # keep track what fields have been found
        used = {}
        entry = Entry()

        # now parse text
        for line in lines:
            for field, regexp in entry_config.iteritems():
                #log.debug('search field: %s regexp: %s' % (field, regexp))
                match = re.search(regexp, line)
                if match:
                    # check if used field detected, in such case start with new entry
                    if field in used:
                        if entry.isvalid():
                            log.info(
                                'Found field %s again before entry was completed. \
                                      Adding current incomplete, but valid entry and moving to next.'
                                % field)
                            self.format_entry(entry, format_config)
                            entries.append(entry)
                        else:
                            log.info(
                                'Invalid data, entry field %s is already found once. Ignoring entry.'
                                % field)
                        # start new entry
                        entry = Entry()
                        used = {}

                    # add field to entry
                    try:
                        entry[field] = match.group(1)
                    except IndexError:
                        log.error(
                            'regex for field `%s` must contain a capture group'
                            % field)
                        raise plugin.PluginError(
                            'Your text plugin config contains errors, please correct them.'
                        )
                    used[field] = True
                    log.debug('found field: %s value: %s' %
                              (field, entry[field]))

                # if all fields have been found
                if len(used) == len(entry_config):
                    # check that entry has atleast title and url
                    if not entry.isvalid():
                        log.info(
                            'Invalid data, constructed entry is missing mandatory fields (title or url)'
                        )
                    else:
                        self.format_entry(entry, format_config)
                        entries.append(entry)
                        log.debug('Added entry %s' % entry)
                        # start new entry
                        entry = Entry()
                        used = {}
        return entries
Ejemplo n.º 48
0
 def test_encoding(self):
     e = Entry('title', 'url')
     with pytest.raises(EntryUnicodeError):
         e['invalid'] = b'\x8e'
Ejemplo n.º 49
0
    def on_task_input(self, task, config):
        # Load The Configs
        token = config['token']
        only_new = config['only_new']
        entry_config = config.get('entry')
        whitelist = config.get('whitelist', [])
        types = config.get('types', ['private', 'group'])

        # Get Last Checked ID
        persistence_name = f"{token}_update_id"
        update_id = task.simple_persistence.get(persistence_name)

        # Get only new messages
        params = {}
        if update_id and only_new:
            update_id += 1
            params['offset'] = update_id

        # The Target URL
        url = f"{_TELEGRAM_API_URL}bot{token}/getUpdates"

        # Get Telegram Updates
        try:
            response = task.requests.get(url,
                                         timeout=60,
                                         raise_status=True,
                                         params=params).json()
        except HTTPError as e:
            raise plugin.PluginError(f"Error getting telegram update: {e}")

        # We have a error
        if not response['ok']:
            raise plugin.PluginError(
                f"Telegram updater returned error {response['error_code']}: {response['description']}"
            )

        # Get All New Messages
        messages = response['result']

        entries = []
        for message in messages:
            # This is the ID
            update_id = message['update_id']

            # Update the last ID for the Bot
            logger.debug("Last Update set to {}", update_id)
            task.simple_persistence[persistence_name] = update_id

            # We Don't care if it's not a message or no text
            if ('message' not in message or 'text' not in message['message']
                    or 'chat' not in message['message']
                    or 'type' not in message['message']['chat']):
                logger.debug("Invalid message discarted: {}", message)
                continue

            logger.debug("Income message: {}", message)

            # Check Types
            if types and message['message']['chat']['type'] not in types:
                logger.debug("Ignoring message because of invalid type {}",
                             message)
                continue

            # Create Base Entry
            text = message['message']['text']
            entry = Entry()
            entry['title'] = text

            # We need a url, so we add a dummy
            entry['url'] = f"http://localhost?update_id={str(update_id)}"

            # Store the message if we need to use it in other plugins
            entry['telegram_message'] = message['message']

            # Check From
            message_from = message['message']['from']
            message_chat = message['message']['chat']

            if whitelist:
                for check in whitelist:
                    if 'username' in check and check[
                            'username'] == message_from['username']:
                        logger.debug("WhiteListing: Username {}",
                                     message_from['username'])
                        break
                    elif ('fullname' in check and check['fullname']['first']
                          == message_from['first_name']
                          and check['fullname']['sur']
                          == message_from['last_name']):
                        logger.debug(
                            "WhiteListing: Full Name {} {}",
                            message_from['first_name'],
                            message_from['last_name'],
                        )
                        break
                    elif 'group' in check:
                        if (message_chat['type'] == 'group'
                                and message_chat['title'] == check['group']):
                            logger.debug("WhiteListing: Group {}",
                                         message_chat['title'])
                            break
                else:
                    logger.debug(
                        "Ignoring message because of no whitelist match {}",
                        message)
                    continue

            # Process the entry config
            accept = True
            if entry_config:
                for field, regexp in entry_config.items():
                    match = re.search(regexp, text)
                    if match:
                        try:
                            # Add field to entry
                            entry[field] = match.group(1)
                        except IndexError:
                            logger.error(
                                'Regex for field `{}` must contain a capture group',
                                field)
                            raise plugin.PluginError(
                                'Your from_telegram plugin config contains errors, please correct them.'
                            )
                    else:
                        logger.debug(
                            'Ignored entry, not match on field {}: {}', field,
                            entry)
                        accept = False
                        break

            # Append the entry
            if accept:
                entries.append(entry)
                logger.debug('Added entry {}', entry)

        return entries
Ejemplo n.º 50
0
    def list_entries(self):
        series_url, series_headers = self.request_builder(
            self.config.get('base_url'), 'series', self.config.get('port'),
            self.config['api_key'])
        json = self.get_json(series_url, series_headers)

        # Retrieves Sonarr's profile list if include_data is set to true
        if self.config.get('include_data'):
            profile_url, profile_headers = self.request_builder(
                self.config.get('base_url'), 'profile',
                self.config.get('port'), self.config['api_key'])
            profiles_json = self.get_json(profile_url, profile_headers)

        entries = []
        for show in json:
            fg_qualities = ''  # Initializes the quality parameter
            fg_cutoff = ''
            path = None
            if not show['monitored'] and self.config.get(
                    'only_monitored'
            ):  # Checks if to retrieve just monitored shows
                continue
            if show['status'] == 'ended' and not self.config.get(
                    'include_ended'):  # Checks if to retrieve ended shows
                continue
            if self.config.get(
                    'include_data'
            ) and profiles_json:  # Check if to retrieve quality & path
                path = show.get('path')
                for profile in profiles_json:
                    if profile['id'] == show[
                            'profileId']:  # Get show's profile data from all possible profiles
                        fg_qualities, fg_cutoff = self.quality_requirement_builder(
                            profile)
            entry = Entry(title=show['title'],
                          url='',
                          series_name=show['title'],
                          tvdb_id=show.get('tvdbId'),
                          tvrage_id=show.get('tvRageId'),
                          tvmaze_id=show.get('tvMazeId'),
                          imdb_id=show.get('imdbid'),
                          slug=show.get('titleSlug'),
                          sonarr_id=show.get('id'),
                          configure_series_target=fg_cutoff)
            if self.config.get('include_data'):
                if len(fg_qualities) > 1:
                    entry['configure_series_qualities'] = fg_qualities
                elif len(fg_qualities) == 1:
                    entry['configure_series_quality'] = fg_qualities[0]
                else:
                    entry['configure_series_quality'] = fg_qualities
                if path:
                    entry['configure_series_path'] = path
            if entry.isvalid():
                log.debug('returning entry %s', entry)
                entries.append(entry)
            else:
                log.error('Invalid entry created? %s' % entry)
                continue

        return entries
Ejemplo n.º 51
0
    def on_task_input(self, task, config):
        """
        This plugin returns ALL of the shows monitored by Sickbeard.
        This includes both ongoing and ended.
        Syntax:

        sickbeard:
          base_url=<value>
          port=<value>
          api_key=<value>

        Options base_url and api_key are required.

        Use with input plugin like discover and/or configure_series.
        Example:

        download-tv-task:
          configure_series:
            settings:
              quality:
                - 720p
            from:
              sickbeard:
                base_url: http://localhost
                port: 8531
                api_key: MYAPIKEY1123
          discover:
            what:
              - next_series_episodes: yes
            from:
              torrentz: any
          download:
            /download/tv

        Note that when using the configure_series plugin with Sickbeard
        you are basically synced to it, so removing a show in Sickbeard will
        remove it in flexget as well, which could be positive or negative,
        depending on your usage.
        """
        parsedurl = urlparse(config.get('base_url'))
        url = '%s://%s:%s%s/api/%s/?cmd=shows' % (parsedurl.scheme, parsedurl.netloc,
                                                  config.get('port'), parsedurl.path, config.get('api_key'))
        try:
            json = task.requests.get(url).json()
        except RequestException as e:
            raise plugin.PluginError('Unable to connect to Sickbeard at %s://%s:%s%s. Error: %s'
                                     % (parsedurl.scheme, parsedurl.netloc, config.get('port'), parsedurl.path, e))
        entries = []
        for _, show in list(json['data'].items()):
            fg_qualities = ''  # Initializes the quality parameter
            if show['paused'] and config.get('only_monitored'):
                continue
            if show['status'] == 'Ended' and not config.get('include_ended'):
                continue
            if config.get('include_data'):
                show_url = '%s:%s/api/%s/?cmd=show&tvdbid=%s' % (config['base_url'], config['port'],
                                                                 config['api_key'], show['tvdbid'])
                show_json = task.requests.get(show_url).json()
                fg_qualities = self.quality_requirement_builder(show_json['data']['quality_details']['initial'])
            entry = Entry(title=show['show_name'],
                          url='',
                          series_name=show['show_name'],
                          tvdb_id=show.get('tvdbid'),
                          tvrage_id=show.get('tvrage_id'))
            if len(fg_qualities) > 1:
                entry['configure_series_qualities'] = fg_qualities
            elif len(fg_qualities) == 1:
                entry['configure_series_quality'] = fg_qualities[0]
            else:
                entry['configure_series_quality'] = fg_qualities
            if entry.isvalid():
                entries.append(entry)
            else:
                log.error('Invalid entry created? %s' % entry)
                continue
            # Test mode logging
            if task.options.test:
                log.info("Test mode. Entry includes:")
                for key, value in list(entry.items()):
                    log.info('     {}: {}'.format(key.capitalize(), value))

        return entries
Ejemplo n.º 52
0
 def process(self):
     imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance
     self.changes.sort()
     udata = load_uoccin_data(self.folder)
     for line in self.changes:
         tmp = line.split('|')
         typ = tmp[1]
         tid = tmp[2]
         fld = tmp[3]
         val = tmp[4]
         self.log.verbose(
             'processing: type=%s, target=%s, field=%s, value=%s' %
             (typ, tid, fld, val))
         if typ == 'movie':
             # default
             mov = udata['movies'].setdefault(
                 tid, {
                     'name': 'N/A',
                     'watchlist': False,
                     'collected': False,
                     'watched': False
                 })
             # movie title is unknown at this time
             fake = Entry()
             fake['url'] = 'http://www.imdb.com/title/' + tid
             fake['imdb_id'] = tid
             try:
                 imdb_lookup.lookup(fake)
                 mov['name'] = fake.get('imdb_name')
             except plugin.PluginError:
                 self.log.warning(
                     'Unable to lookup movie %s from imdb, using raw name.'
                     % tid)
             # setting
             if fld == 'watchlist':
                 mov['watchlist'] = val == 'true'
             elif fld == 'collected':
                 mov['collected'] = val == 'true'
             elif fld == 'watched':
                 mov['watched'] = val == 'true'
             elif fld == 'tags':
                 mov['tags'] = re.split(',\s*', val)
             elif fld == 'subtitles':
                 mov['subtitles'] = re.split(',\s*', val)
             elif fld == 'rating':
                 mov['rating'] = int(val)
             # cleaning
             if not (mov['watchlist'] or mov['collected']
                     or mov['watched']):
                 self.log.verbose('deleting unused section: movies\%s' %
                                  tid)
                 udata['movies'].pop(tid)
         elif typ == 'series':
             tmp = tid.split('.')
             sid = tmp[0]
             sno = tmp[1] if len(tmp) > 2 else None
             eno = tmp[2] if len(tmp) > 2 else None
             # default
             ser = udata['series'].setdefault(
                 sid, {
                     'name': 'N/A',
                     'watchlist': False,
                     'collected': {},
                     'watched': {}
                 })
             # series name is unknown at this time
             try:
                 series = lookup_series(tvdb_id=sid)
                 ser['name'] = series.name
             except LookupError:
                 self.log.warning(
                     'Unable to lookup series %s from tvdb, using raw name.'
                     % sid)
             # setting
             if fld == 'watchlist':
                 ser['watchlist'] = val == 'true'
             elif fld == 'tags':
                 ser['tags'] = re.split(',\s*', val)
             elif fld == 'rating':
                 ser['rating'] = int(val)
             elif sno is None or eno is None:
                 self.log.warning(
                     'invalid line "%s": season and episode numbers are required'
                     % line)
             elif fld == 'collected':
                 season = ser['collected'].setdefault(sno, {})
                 if val == 'true':
                     season.setdefault(eno, [])
                 else:
                     if eno in season:
                         season.pop(eno)
                     if not season:
                         self.log.verbose(
                             'deleting unused section: series\%s\collected\%s'
                             % (sid, sno))
                         ser['collected'].pop(sno)
             elif fld == 'subtitles':
                 ser['collected'].setdefault(sno, {})[eno] = re.split(
                     ',\s*', val)
             elif fld == 'watched':
                 season = ser['watched'].setdefault(sno, [])
                 if val == 'true':
                     season = ser['watched'][sno] = list(
                         set(season) | set([int(eno)]))
                 elif int(eno) in season:
                     season.remove(int(eno))
                 season.sort()
                 if not season:
                     self.log.debug(
                         'deleting unused section: series\%s\watched\%s' %
                         (sid, sno))
                     ser['watched'].pop(sno)
             # cleaning
             if not (ser['watchlist'] or ser['collected']
                     or ser['watched']):
                 self.log.debug('deleting unused section: series\%s' % sid)
                 udata['series'].pop(sid)
         else:
             self.log.warning('invalid element type "%s"' % typ)
     # save the updated uoccin.json
     ufile = os.path.join(self.folder, 'uoccin.json')
     try:
         text = json.dumps(udata,
                           sort_keys=True,
                           indent=4,
                           separators=(',', ': '))
         with open(ufile, 'w') as f:
             f.write(text)
     except Exception as err:
         self.log.debug('error writing %s: %s' % (ufile, err))
         raise plugin.PluginError('error writing %s: %s' % (ufile, err))
Ejemplo n.º 53
0
    def search(self, task, entry, config):
        """
            Search for entries on PassThePopcorn
        """
        params = {}

        if 'tags' in config:
            tags = config['tags'] if isinstance(config['tags'],
                                                list) else [config['tags']]
            params['taglist'] = ',+'.join(tags)

        release_type = config.get('release_type')
        if release_type:
            params['scene'] = RELEASE_TYPES[release_type]

        if config.get('freeleech'):
            params['freetorrent'] = int(config['freeleech'])

        ordering = 'desc' if config['order_desc'] else 'asc'

        entries = set()

        params.update({
            'order_by': ORDERING[config['order_by']],
            'order_way': ordering,
            'action': 'advanced',
            'json': 'noredirect'
        })

        search_strings = entry.get('search_strings', [entry['title']])

        # searching with imdb id is much more precise
        if entry.get('imdb_id'):
            search_strings = [entry['imdb_id']]

        for search_string in search_strings:
            params['searchstr'] = search_string
            log.debug('Using search params: %s', params)
            try:
                result = self.get(self.base_url + 'torrents.php', params,
                                  config['username'], config['password'],
                                  config['passkey']).json()
            except RequestException as e:
                log.error('PassThePopcorn request failed: %s', e)
                continue

            total_results = result['TotalResults']
            log.debug('Total results: %s', total_results)

            authkey = result['AuthKey']
            passkey = result['PassKey']

            for movie in result['Movies']:
                # skip movies that are irrelevant
                if entry.get('movie_year') and int(movie['Year']) != int(
                        entry['movie_year']):
                    log.debug('Movie year %s does not match %s', movie['Year'],
                              entry['movie_year'])
                    continue
                # imdb id in the json result is without 'tt'
                if entry.get(
                        'imdb_id') and movie['ImdbId'] not in entry['imdb_id']:
                    log.debug('imdb id %s does not match %s', movie['ImdbId'],
                              entry['imdb_id'])
                    continue

                for torrent in movie['Torrents']:
                    e = Entry()

                    e['title'] = torrent['ReleaseName']

                    e['torrent_tags'] = movie['Tags']
                    e['content_size'] = parse_filesize(torrent['Size'] + ' b')
                    e['torrent_snatches'] = int(torrent['Snatched'])
                    e['torrent_seeds'] = int(torrent['Seeders'])
                    e['torrent_leeches'] = int(torrent['Leechers'])
                    e['torrent_id'] = int(torrent['Id'])
                    e['golden_popcorn'] = torrent['GoldenPopcorn']
                    e['checked'] = torrent['Checked']
                    e['uploaded_at'] = dateutil_parse(torrent['UploadTime'])

                    e['url'] = self.base_url + 'torrents.php?action=download&id={}&authkey={}&torrent_pass={}'.format(
                        e['torrent_id'], authkey, passkey)

                    entries.add(e)

        return entries
Ejemplo n.º 54
0
    def post(self, session=None):
        """ Execute task and stream results """
        data = request.json
        for task in data.get('tasks'):
            if task.lower() not in [
                    t.lower()
                    for t in self.manager.user_config.get('tasks', {}).keys()
            ]:
                return {'error': 'task %s does not exist' % task}, 404

        queue = ExecuteLog()
        output = queue if data.get('loglevel') else None
        stream = True if any(
            arg[0] in ['progress', 'summary', 'loglevel', 'entry_dump']
            for arg in data.items() if arg[1]) else False
        loglevel = data.pop('loglevel', None)

        # This emulates the CLI command of using `--now`
        options = {'interval_ignore': data.pop('now', None)}

        for option, value in data.items():
            options[option] = value

        if data.get('inject'):
            entries = []
            for item in data.get('inject'):
                entry = Entry()
                entry['url'] = item['url']
                if not item.get('title'):
                    try:
                        value, params = cgi.parse_header(
                            requests.head(
                                item['url']).headers['Content-Disposition'])
                        entry['title'] = params['filename']
                    except KeyError:
                        return {
                            'status':
                            'error',
                            'message':
                            'No title given, and couldn\'t get one from the URL\'s HTTP response'
                        }, 500
                else:
                    entry['title'] = item.get('title')
                if item.get('force'):
                    entry['immortal'] = True
                if item.get('accept'):
                    entry.accept(reason='accepted by API inject')
                if item.get('fields'):
                    for key, value in item.get('fields').items():
                        entry[key] = value
                entries.append(entry)
            options['inject'] = entries

        executed_tasks = self.manager.execute(options=options,
                                              output=output,
                                              loglevel=loglevel)

        tasks_queued = []

        for task_id, task_name, task_event in executed_tasks:
            tasks_queued.append({
                'id': task_id,
                'name': task_name,
                'event': task_event
            })
            _streams[task_id] = {
                'queue': queue,
                'last_update': datetime.now(),
                'args': data
            }

        if not stream:
            return jsonify({
                'tasks': [{
                    'id': task['id'],
                    'name': task['name']
                } for task in tasks_queued]
            })

        def stream_response():
            # First return the tasks to execute
            yield '{"stream": ['
            yield json.dumps({
                'tasks': [{
                    'id': task['id'],
                    'name': task['name']
                } for task in tasks_queued]
            }) + ',\n'

            while True:
                try:
                    yield queue.get(timeout=1) + ',\n'
                    continue
                except Empty:
                    pass

                if queue.empty() and all(
                    [task['event'].is_set() for task in tasks_queued]):
                    for task in tasks_queued:
                        del _streams[task['id']]
                    break
            yield '{}]}'

        return Response(stream_response(), mimetype='text/event-stream')
Ejemplo n.º 55
0
    def search(self, task, entry, config):
        """CPASBIEN search plugin

        Config example:

        tv_search_cpasbien:
            discover:
              what:
                 - trakt_list:
                   username: xxxxxxx
                   api_key: xxxxxxx
                        series: watchlist
                  from:
                    - cpasbien:
                        category: "series-vostfr"
                  interval: 1 day
                  ignore_estimations: yes

        Category is ONE of:
            all
            films
            series
            musique
            films-french
            1080p
            720p
            series-francaise
            films-dvdrip
            films-vostfr
            series-vostfr
            ebook
        """

        base_url = 'http://www.cpasbien.io'
        entries = set()
        for search_string in entry.get('search_strings', [entry['title']]):
            search_string = search_string.replace(' ', '-').lower()
            search_string = search_string.replace('(', '')
            search_string = search_string.replace(')', '')
            query = normalize_unicode(search_string)
            query_url_fragment = quote_plus(query.encode('utf-8'))
            # http://www.cpasbien.pe/recherche/ncis.html
            if config['category'] == 'all':
                str_url = (base_url, 'recherche', query_url_fragment)
                url = '/'.join(str_url)
            else:
                category_url_fragment = '%s' % config['category']
                str_url = (base_url, 'recherche', category_url_fragment,
                           query_url_fragment)
                url = '/'.join(str_url)
            logger.debug('search url: {}', url + '.html')
            # GET URL
            f = task.requests.get(url + '.html').content
            soup = get_soup(f)
            if soup.findAll(text=re.compile(' 0 torrents')):
                logger.debug('search returned no results')
            else:
                nextpage = 0
                while nextpage >= 0:
                    if nextpage > 0:
                        newurl = url + '/page-' + str(nextpage)
                        logger.debug('-----> NEXT PAGE : {}', newurl)
                        f1 = task.requests.get(newurl).content
                        soup = get_soup(f1)
                    for result in soup.findAll(
                            'div', attrs={'class': re.compile('ligne')}):
                        entry = Entry()
                        link = result.find(
                            'a', attrs={'href': re.compile('dl-torrent')})
                        entry['title'] = link.contents[0]
                        # REWRITE URL
                        page_link = link.get('href')
                        link_rewrite = page_link.split('/')
                        # get last value in array remove .html and replace by .torrent
                        endlink = link_rewrite[-1]
                        str_url = (base_url, '/telechargement/', endlink[:-5],
                                   '.torrent')
                        entry['url'] = ''.join(str_url)

                        logger.debug('Title: {} | DL LINK: {}', entry['title'],
                                     entry['url'])

                        entry['torrent_seeds'] = int(
                            result.find('span',
                                        attrs={
                                            'class': re.compile('seed')
                                        }).text)
                        entry['torrent_leeches'] = int(
                            result.find('div',
                                        attrs={
                                            'class': re.compile('down')
                                        }).text)
                        size = result.find('div',
                                           attrs={
                                               'class': re.compile('poid')
                                           }).text

                        entry['content_size'] = parse_filesize(size, si=False)

                        if entry['torrent_seeds'] > 0:
                            entries.add(entry)
                        else:
                            logger.debug('0 SEED, not adding entry')
                    if soup.find(text=re.compile('Suiv')):
                        nextpage += 1
                    else:
                        nextpage = -1
        return entries
Ejemplo n.º 56
0
    def search(self, task, entry, config):
        """
            Search for entries on FileList
        """
        entries = []

        # mandatory params
        params = {
            'username': config['username'],
            'passkey': config['passkey'],
            'action': 'search-torrents',
        }

        # category
        if config.get('category'):
            params['category'] = (
                ','.join(self.valid_categories[cat] for cat in config['category'])
                if isinstance(config.get('category'), list)
                else self.valid_categories[config.get('category')]
            )

        # extras: internal release, moderated torrent, freeleech
        params.update({extra: 1 for extra in self.valid_extras if config.get(extra)})

        # set season/episode if series
        if entry.get('series_episode'):
            params['episode'] = entry.get('series_episode')
        if entry.get('series_season'):
            params['season'] = entry.get('series_season')

        for search_title in entry.get('search_strings', [entry.get('title')]):

            if entry.get('imdb_id'):
                params['type'] = 'imdb'
                params['query'] = entry.get('imdb_id')
                params['name'] = search_title
            else:
                params['type'] = 'name'
                params['query'] = search_title

            # get them results
            try:
                response = self.get(self.api_url, params)
            except RequestException as e:
                raise plugin.PluginError(f'FileList request failed badly! {e}')

            results = response.json()
            if not results:
                logger.verbose('No torrent found on Filelist for `{}`', search_title)
            else:
                logger.verbose(
                    '{} torrent(s) were found on Filelist for `{}`', len(results), search_title
                )

            for result in results:
                entry = Entry()

                entry['title'] = result['name']
                entry['url'] = result['download_link']
                entry['imdb'] = result['imdb']
                # size is returned in bytes but expected in MiB
                entry['content_size'] = result['size'] / 2 ** 20
                entry['torrent_snatches'] = result['times_completed']
                entry['torrent_seeds'] = result['seeders']
                entry['torrent_leeches'] = result['leechers']
                entry['torrent_internal'] = bool(result['internal'])
                entry['torrent_moderated'] = bool(result['moderated'])
                entry['torrent_freeleech'] = bool(result['freeleech'])
                entry['torrent_genres'] = [
                    genres.strip() for genres in result['small_description'].split(',')
                ]

                entries.append(entry)

        return entries
Ejemplo n.º 57
0
    def create_entries(self, page_url, soup, config):

        queue = []
        duplicates = {}
        duplicate_limit = 4

        def title_exists(title):
            """Helper method. Return True if title is already added to entries"""
            for entry in queue:
                if entry['title'] == title:
                    return True

        for link in soup.find_all('a'):
            # not a valid link
            if not link.has_attr('href'):
                continue
            # no content in the link
            if not link.contents:
                continue

            url = link['href']
            log_link = url
            log_link = log_link.replace('\n', '')
            log_link = log_link.replace('\r', '')

            # fix broken urls
            if url.startswith('//'):
                url = 'http:' + url
            elif not url.startswith('http://') or not url.startswith(
                    'https://'):
                url = urlparse.urljoin(page_url, url)

            # get only links matching regexp
            regexps = config.get('links_re', None)
            if regexps:
                accept = False
                for regexp in regexps:
                    if re.search(regexp, url):
                        accept = True
                if not accept:
                    continue

            title_from = config.get('title_from', 'auto')
            if title_from == 'url':
                title = self._title_from_url(url)
                log.debug('title from url: %s' % title)
            elif title_from == 'title':
                if not link.has_attr('title'):
                    log.warning(
                        'Link `%s` doesn\'t have title attribute, ignored.' %
                        log_link)
                    continue
                title = link['title']
                log.debug('title from title: %s' % title)
            elif title_from == 'auto':
                title = self._title_from_link(link, log_link)
                if title is None:
                    continue
                # automatic mode, check if title is unique
                # if there are too many duplicate titles, switch to title_from: url
                if title_exists(title):
                    # ignore index links as a counter
                    if 'index' in title and len(title) < 10:
                        log.debug('ignored index title %s' % title)
                        continue
                    duplicates.setdefault(title, 0)
                    duplicates[title] += 1
                    if duplicates[title] > duplicate_limit:
                        # if from url seems to be bad choice use title
                        from_url = self._title_from_url(url)
                        switch_to = 'url'
                        for ext in ('.html', '.php'):
                            if from_url.endswith(ext):
                                switch_to = 'title'
                        log.info(
                            'Link names seem to be useless, auto-configuring \'title_from: %s\'. '
                            'This may not work well, you might need to configure it yourself.'
                            % switch_to)
                        config['title_from'] = switch_to
                        # start from the beginning  ...
                        return self.create_entries(page_url, soup, config)
            elif title_from == 'link' or title_from == 'contents':
                # link from link name
                title = self._title_from_link(link, log_link)
                if title is None:
                    continue
                log.debug('title from link: %s' % title)
            else:
                raise plugin.PluginError('Unknown title_from value %s' %
                                         title_from)

            if not title:
                log.warning('title could not be determined for link %s' %
                            log_link)
                continue

            # strip unicode white spaces
            title = title.replace(u'\u200B', u'').strip()

            # in case the title contains xxxxxxx.torrent - foooo.torrent clean it a bit (get up to first .torrent)
            # TODO: hack
            if title.lower().find('.torrent') > 0:
                title = title[:title.lower().find('.torrent')]

            if title_exists(title):
                # title link should be unique, add CRC32 to end if it's not
                hash = zlib.crc32(url.encode("utf-8"))
                crc32 = '%08X' % (hash & 0xFFFFFFFF)
                title = '%s [%s]' % (title, crc32)
                # truly duplicate, title + url crc already exists in queue
                if title_exists(title):
                    continue
                log.debug('uniqued title to %s' % title)

            entry = Entry()
            entry['url'] = url
            entry['title'] = title

            queue.append(entry)

        # add from queue to task
        return queue
Ejemplo n.º 58
0
    def on_task_input(self, task, config):
        config = self.build_config(config)

        log.debug('Requesting task `%s` url `%s`' % (task.name, config['url']))

        # Used to identify which etag/modified to use
        url_hash = str(hash(config['url']))

        # set etag and last modified headers if config has not changed since
        # last run and if caching wasn't disabled with --no-cache argument.
        all_entries = (config['all_entries'] or task.config_modified or
                       task.manager.options.nocache or task.manager.options.retry)
        headers = {}
        if not all_entries:
            etag = task.simple_persistence.get('%s_etag' % url_hash, None)
            if etag:
                log.debug('Sending etag %s for task %s' % (etag, task.name))
                headers['If-None-Match'] = etag
            modified = task.simple_persistence.get('%s_modified' % url_hash, None)
            if modified:
                if not isinstance(modified, basestring):
                    log.debug('Invalid date was stored for last modified time.')
                else:
                    headers['If-Modified-Since'] = modified
                    log.debug('Sending last-modified %s for task %s' % (headers['If-Modified-Since'], task.name))

        # Get the feed content
        if config['url'].startswith(('http', 'https', 'ftp', 'file')):
            # Get feed using requests library
            auth = None
            if 'username' in config and 'password' in config:
                auth = (config['username'], config['password'])
            try:
                # Use the raw response so feedparser can read the headers and status values
                response = task.requests.get(config['url'], timeout=60, headers=headers, raise_status=False, auth=auth)
                content = response.content
            except RequestException as e:
                raise PluginError('Unable to download the RSS for task %s (%s): %s' %
                                  (task.name, config['url'], e))

            # status checks
            status = response.status_code
            if status == 304:
                log.verbose('%s hasn\'t changed since last run. Not creating entries.' % config['url'])
                # Let details plugin know that it is ok if this feed doesn't produce any entries
                task.no_entries_ok = True
                return []
            elif status == 401:
                raise PluginError('Authentication needed for task %s (%s): %s' %
                                  (task.name, config['url'], response.headers['www-authenticate']), log)
            elif status == 404:
                raise PluginError('RSS Feed %s (%s) not found' % (task.name, config['url']), log)
            elif status == 500:
                raise PluginError('Internal server exception on task %s (%s)' % (task.name, config['url']), log)
            elif status != 200:
                raise PluginError('HTTP error %s received from %s' % (status, config['url']), log)

            # update etag and last modified
            if not config['all_entries']:
                etag = response.headers.get('etag')
                if etag:
                    task.simple_persistence['%s_etag' % url_hash] = etag
                    log.debug('etag %s saved for task %s' % (etag, task.name))
                if response.headers.get('last-modified'):
                    modified = response.headers['last-modified']
                    task.simple_persistence['%s_modified' % url_hash] = modified
                    log.debug('last modified %s saved for task %s' % (modified, task.name))
        else:
            # This is a file, open it
            content = open(config['url'], 'rb').read()

        if not content:
            log.error('No data recieved for rss feed.')
            return
        try:
            rss = feedparser.parse(content)
        except LookupError as e:
            raise PluginError('Unable to parse the RSS (from %s): %s' % (config['url'], e))

        # check for bozo
        ex = rss.get('bozo_exception', False)
        if ex or rss.get('bozo'):
            if rss.entries:
                msg = 'Bozo error %s while parsing feed, but entries were produced, ignoring the error.' % type(ex)
                if config.get('silent', False):
                    log.debug(msg)
                else:
                    log.verbose(msg)
            else:
                if isinstance(ex, feedparser.NonXMLContentType):
                    # see: http://www.feedparser.org/docs/character-encoding.html#advanced.encoding.nonxml
                    log.debug('ignoring feedparser.NonXMLContentType')
                elif isinstance(ex, feedparser.CharacterEncodingOverride):
                    # see: ticket 88
                    log.debug('ignoring feedparser.CharacterEncodingOverride')
                elif isinstance(ex, UnicodeEncodeError):
                    raise PluginError('Feed has UnicodeEncodeError while parsing...')
                elif isinstance(ex, (xml.sax._exceptions.SAXParseException, xml.sax._exceptions.SAXException)):
                    # save invalid data for review, this is a bit ugly but users seem to really confused when
                    # html pages (login pages) are received
                    self.process_invalid_content(task, content)
                    if task.manager.options.debug:
                        log.exception(ex)
                    raise PluginError('Received invalid RSS content from task %s (%s)' % (task.name, config['url']))
                elif isinstance(ex, httplib.BadStatusLine) or isinstance(ex, IOError):
                    raise ex  # let the @internet decorator handle
                else:
                    # all other bozo errors
                    self.process_invalid_content(task, content)
                    raise PluginError('Unhandled bozo_exception. Type: %s (task: %s)' %
                                      (ex.__class__.__name__, task.name), log)

        log.debug('encoding %s' % rss.encoding)

        last_entry_id = ''
        if not all_entries:
            # Test to make sure entries are in descending order
            if rss.entries and rss.entries[0].get('published_parsed'):
                if rss.entries[0]['published_parsed'] < rss.entries[-1]['published_parsed']:
                    # Sort them if they are not
                    rss.entries.sort(key=lambda x: x['published_parsed'], reverse=True)
            last_entry_id = task.simple_persistence.get('%s_last_entry' % url_hash)

        # new entries to be created
        entries = []

        # field name for url can be configured by setting link.
        # default value is auto but for example guid is used in some feeds
        ignored = 0
        for entry in rss.entries:

            # Check if title field is overridden in config
            title_field = config.get('title', 'title')
            # ignore entries without title
            if not entry.get(title_field):
                log.debug('skipping entry without title')
                ignored += 1
                continue

            # Set the title from the source field
            entry.title = entry[title_field]

            # Check we haven't already processed this entry in a previous run
            if last_entry_id == entry.title + entry.get('guid', ''):
                log.verbose('Not processing entries from last run.')
                # Let details plugin know that it is ok if this task doesn't produce any entries
                task.no_entries_ok = True
                break

            # convert title to ascii (cleanup)
            if config.get('ascii', False):
                entry.title = entry.title.encode('ascii', 'ignore')

            # remove annoying zero width spaces
            entry.title = entry.title.replace(u'\u200B', u'')

            # Dict with fields to grab mapping from rss field name to FlexGet field name
            fields = {'guid': 'guid',
                      'author': 'author',
                      'description': 'description',
                      'infohash': 'torrent_info_hash'}
            # extend the dict of fields to grab with other_fields list in config
            for field_map in config.get('other_fields', []):
                fields.update(field_map)

            # helper
            # TODO: confusing? refactor into class member ...

            def add_entry(ea):
                ea['title'] = entry.title

                for rss_field, flexget_field in fields.iteritems():
                    if rss_field in entry:
                        if not isinstance(getattr(entry, rss_field), basestring):
                            # Error if this field is not a string
                            log.error('Cannot grab non text field `%s` from rss.' % rss_field)
                            # Remove field from list of fields to avoid repeated error
                            config['other_fields'].remove(rss_field)
                            continue
                        if not getattr(entry, rss_field):
                            log.debug('Not grabbing blank field %s from rss for %s.' % (rss_field, ea['title']))
                            continue
                        try:
                            ea[flexget_field] = decode_html(entry[rss_field])
                            if rss_field in config.get('other_fields', []):
                                # Print a debug message for custom added fields
                                log.debug('Field `%s` set to `%s` for `%s`' % (rss_field, ea[rss_field], ea['title']))
                        except UnicodeDecodeError:
                            log.warning('Failed to decode entry `%s` field `%s`' % (ea['title'], rss_field))
                # Also grab pubdate if available
                if hasattr(entry, 'published_parsed') and entry.published_parsed:
                    ea['rss_pubdate'] = datetime(*entry.published_parsed[:6])
                # store basic auth info
                if 'username' in config and 'password' in config:
                    ea['basic_auth_username'] = config['username']
                    ea['basic_auth_password'] = config['password']
                entries.append(ea)

            # create from enclosures if present
            enclosures = entry.get('enclosures', [])

            if len(enclosures) > 1 and not config.get('group_links'):
                # There is more than 1 enclosure, create an Entry for each of them
                log.debug('adding %i entries from enclosures' % len(enclosures))
                for enclosure in enclosures:
                    if not 'href' in enclosure:
                        log.debug('RSS-entry `%s` enclosure does not have URL' % entry.title)
                        continue
                    # There is a valid url for this enclosure, create an Entry for it
                    ee = Entry()
                    self.add_enclosure_info(ee, enclosure, config.get('filename', True), True)
                    add_entry(ee)
                # If we created entries for enclosures, we should not create an Entry for the main rss item
                continue

            # create flexget entry
            e = Entry()

            if not isinstance(config.get('link'), list):
                # If the link field is not a list, search for first valid url
                if config['link'] == 'auto':
                    # Auto mode, check for a single enclosure url first
                    if len(entry.get('enclosures', [])) == 1 and entry['enclosures'][0].get('href'):
                        self.add_enclosure_info(e, entry['enclosures'][0], config.get('filename', True))
                    else:
                        # If there is no enclosure url, check link, then guid field for urls
                        for field in ['link', 'guid']:
                            if entry.get(field):
                                e['url'] = entry[field]
                                break
                else:
                    if entry.get(config['link']):
                        e['url'] = entry[config['link']]
            else:
                # If link was passed as a list, we create a list of urls
                for field in config['link']:
                    if entry.get(field):
                        e.setdefault('url', entry[field])
                        if entry[field] not in e.setdefault('urls', []):
                            e['urls'].append(entry[field])

            if config.get('group_links'):
                # Append a list of urls from enclosures to the urls field if group_links is enabled
                e.setdefault('urls', [e['url']]).extend(
                    [enc.href for enc in entry.get('enclosures', []) if enc.get('href') not in e['urls']])

            if not e.get('url'):
                log.debug('%s does not have link (%s) or enclosure' % (entry.title, config['link']))
                ignored += 1
                continue

            add_entry(e)

        # Save last spot in rss
        if rss.entries:
            log.debug('Saving location in rss feed.')
            task.simple_persistence['%s_last_entry' % url_hash] = rss.entries[0].title + rss.entries[0].get('guid', '')

        if ignored:
            if not config.get('silent'):
                log.warning('Skipped %s RSS-entries without required information (title, link or enclosures)' % ignored)

        return entries
Ejemplo n.º 59
0
    def items(self):
        if self._items is None:
            log.debug('fetching items from IMDB')
            try:
                r = self.session.get(
                    'https://www.imdb.com/list/export?list_id=%s&author_id=%s'
                    % (self.list_id, self.user_id),
                    cookies=self.cookies)
                lines = list(r.iter_lines(decode_unicode=True))
            except RequestException as e:
                raise PluginError(e.args[0])
            # Normalize headers to lowercase
            lines[0] = lines[0].lower()
            self._items = []
            for row in csv_dictreader(lines):
                log.debug('parsing line from csv: %s', row)

                try:
                    item_type = row['title type'].lower()
                    name = row['title']
                    year = int(row['year']) if row['year'] != '????' else None
                    created = datetime.strptime(
                        row['created'],
                        '%Y-%m-%d') if row.get('created') else None
                    modified = datetime.strptime(
                        row['modified'],
                        '%Y-%m-%d') if row.get('modified') else None
                    entry = Entry({
                        'title':
                        '%s (%s)' % (name, year) if year != '????' else name,
                        'url':
                        row['url'],
                        'imdb_id':
                        row['const'],
                        'imdb_url':
                        row['url'],
                        'imdb_list_position':
                        int(row['position']) if 'position' in row else None,
                        'imdb_list_created':
                        created,
                        'imdb_list_modified':
                        modified,
                        'imdb_list_description':
                        row.get('description'),
                        'imdb_name':
                        name,
                        'imdb_year':
                        year,
                        'imdb_user_score':
                        float(row['imdb rating'])
                        if row['imdb rating'] else None,
                        'imdb_votes':
                        int(row['num votes']) if row['num votes'] else None,
                        'imdb_genres':
                        [genre.strip() for genre in row['genres'].split(',')]
                    })

                except ValueError as e:
                    log.debug(
                        'no movie row detected, skipping. %s. Exception: %s',
                        row, e)
                    continue

                if item_type in MOVIE_TYPES:
                    entry['movie_name'] = name
                    entry['movie_year'] = year
                elif item_type in SERIES_TYPES:
                    entry['series_name'] = name
                    entry['series_year'] = year
                elif item_type in OTHER_TYPES:
                    entry['title'] = name
                else:
                    log.verbose(
                        'Unknown IMDB type entry received: %s. Skipping',
                        item_type)
                    continue
                self._items.append(entry)
        return self._items
Ejemplo n.º 60
0
    def on_task_input(self, task, config):
        config = self.prepare_config(config)
        if not config['enabled']:
            return

        if not self.client:
            self.client = self.create_rpc_client(config)
        entries = []

        session = self.client.get_session()

        for torrent in self.client.get_torrents():
            seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
            if config['only_complete'] and not (
                seed_ratio_ok and idle_limit_ok and torrent.progress == 100
            ):
                continue
            entry = Entry(
                title=torrent.name,
                url='',
                torrent_info_hash=torrent.hashString,
                content_size=torrent.totalSize / (1024 * 1024),
            )
            # Location of torrent is only valid if transmission is on same machine as flexget
            if config['host'] in ('localhost', '127.0.0.1'):
                entry['location'] = torrent.torrentFile
                entry['url'] = 'file://' + torrent.torrentFile
            for attr in [
                'id',
                'comment',
                'desiredAvailable',
                'downloadDir',
                'isFinished',
                'isPrivate',
                'leftUntilDone',
                'ratio',
                'status',
                'date_active',
                'date_added',
                'date_done',
                'date_started',
                'errorString',
                'priority',
                'progress',
                'secondsDownloading',
                'secondsSeeding',
                'torrentFile',
            ]:
                try:
                    entry['transmission_' + attr] = getattr(torrent, attr)
                except Exception:
                    logger.opt(exception=True).debug(
                        'error when requesting transmissionrpc attribute {}', attr
                    )
            # Availability in percent
            entry['transmission_availability'] = (
                (torrent.desiredAvailable / torrent.leftUntilDone) if torrent.leftUntilDone else 0
            )

            entry['transmission_trackers'] = [t['announce'] for t in torrent.trackers]
            entry['transmission_seed_ratio_ok'] = seed_ratio_ok
            entry['transmission_idle_limit_ok'] = idle_limit_ok
            st_error_to_desc = {
                0: 'OK',
                1: 'tracker_warning',
                2: 'tracker_error',
                3: 'local_error',
            }
            entry['transmission_error_state'] = st_error_to_desc[torrent.error]
            # Built in done_date doesn't work when user adds an already completed file to transmission
            if torrent.progress == 100:
                entry['transmission_date_done'] = datetime.fromtimestamp(
                    max(torrent.addedDate, torrent.doneDate)
                )
            entries.append(entry)
        return entries