Example #1
0
 def download_retry(self, count=3):
     """Same as download(), but retries count times upon failure."""
     i = 1
     while True:
         try:
             return self.download()
         except TorrentDownloadError, e:
             if i < count:
                 logging.debug("Download attempt %s of %s failed: %s. "
                              "Retrying..." % (i, count, e))
                 i+=1
             else:
                 raise
Example #2
0
 def save(self, directory=None, filename=None, retry=3):
     """Save torrent to path, or output-directory or output-directory2
     in the configuration if called with no arguments. It is downloaded 
     if it hasn't been already. Returns path that torrent was saved to.
     
     Arguments:
     Directory - Full directory to save torrent under.
                 Default: output-directory from config
     filename - Filename to save torrent as. Default: Automatically
                generated from episode details.
     retry - Number of times to attempt download. Default: 3
     """
     if directory:
         if not os.path.exists(directory):
             raise TorrentWriteError, "Output directory doesn't exist."
     else:
         if os.path.exists(config['output-directory']):
             directory = config['output-directory']
         elif config['output-directory2'] \
                 and os.path.exists(config['output-directory2']):
             directory = config['output-directory2']
         else:
             raise TorrentWriteError, "Output directory doesn't exist."
     if not self.file:
         if retry > 1:
             self.download_retry(retry)
         else:
             self.download()
     if not filename:
         if not config['friendly-filenames']:
             if self._server_filename:
                 filename = self._server_filename
             else:
                 parsed_url = urlparse.urlparse(self.url)
                 if parsed_url[2][-8:] == ".torrent":
                     try:
                         filename = urllib2.unquote(
                                             parsed_url[2].split("/")[-1])
                     except IndexError:
                         pass
         # friendly-filenames and a fallback
         if not filename:
             filename = "%s.torrent" % str(self.episode)
     filename = self._get_valid_filename(filename)
     path = os.path.join(directory, filename)
     logging.debug("Saving torrent to %s..." % path)
     try:
         f = open(path, "w")
     except IOError, e:
         raise TorrentSaveError, "Can't open torrent file for writing: %s"\
                                     % e
Example #3
0
 def get_details(self):
     """If details are missing, fetches the human_name and show_type
     from the RSS feed. Returns dictionary with keys human_name and 
     show_type."""
     if not self.rss:
         self._get_rss_feed()
     logging.debug("Getting details for %s..." % self)
     if not self.rss['entries']:
         raise ShowFeedNoEpisodesError
     # Determine human title. We are assuming here that the first episode
     # in the feed has a useful description. This may cause problems
     r = re.compile('Show Name\s*: (.*?);')
     name_match = r.search(self.rss['entries'][0].description)
     if not name_match:
         raise ShowDetailsError, "Could not determine show name for %s." \
                                     % self
     human_name = name_match.group(1)
     # Determine show type
     title_re = re.compile('Show\s*Title\s*:\s*(.*?);')
     season_re = re.compile('Season\s*:\s*([0-9]*?);')
     episode_re = re.compile('Episode\s*:\s*([0-9]*?)$')
     date_re = re.compile('Episode\s*Date:\s*([0-9\-]+)$')
     d = {
         'seasonepisode': 0,
         'date': 0,
         'title': 0
     }
     for episode in self.rss['entries']:
         title_match = title_re.search(episode.description)
         season_match = season_re.search(episode.description)
         episode_match = episode_re.search(episode.description)
         date_match = date_re.search(episode.description)
         if season_match and episode_match:
             d['seasonepisode'] += 1
         elif date_match:
             d['date'] += 1
         elif title_match and title_match.group(1) != 'n/a':
             d['title'] += 1
     # Nothing could be found, fall back to "time" type
     if d.values() == [0, 0, 0]:
         show_type = "time"
     else:
         # Sort keys based on values
         e = d.keys()
         e.sort(cmp=lambda a,b: cmp(d[a], d[b]))
         show_type = e[-1]
     self.human_name = human_name
     self.show_type = show_type
     return {'show_type': show_type, 'human_name': human_name}
Example #4
0
 def _save_episode(self, episode):
         try:
             filename = episode.save(config["quality"])
             logging.info("%s saved to %s" % (episode, filename))
             return True
         except EpisodeQualityDelayError:
             logging.debug("Delaying download of this episode to wait for "
                          "a higher quality to be released.")
         except EpisodeNoWorkingTorrentsError:
             #if key == keys[-1]:
                 # TODO: only warn about this once otherwise cron jobs
                 #       will get oh-so-annoying. store in state file
                 #       so we're only bugged once or twice
             #    logging.info("No working torrents found for %s. The "
             #                 "download will be attempted again next "
             #                 "time PyTVShows is run." 
             #                % new_episodes[key])
             #else:
                 # TODO: store failed torrents in the state file for
                 #       retrying
             logging.warn("No working torrents found for %s." % episode)
         return False
Example #5
0
 def download(self):
     """Download this torrent and store the bdecoded dictionary and
     the torrent file in the dict and file properties respectively.
     The first successful tracker response is stored in the 
     tracker_response property.
     
     Returns the torrent as a bdecoded dictionary.
     """
     # TODO: there is no need downloading & checking each time tracker 
     #       fails split this up into downloading torrent and checking 
     #       tracker
     logging.debug("Downloading %s..." % self.url)
     request = urllib2.Request(self.url)
     request.add_header('User-Agent', USER_AGENT)
     try:
         f = urllib2.urlopen(request)
     except urllib2.URLError, e:
         if hasattr(e, "reason"):
             raise TorrentDownloadError, "Could not reach server: %s" \
                                         % e.reason
         elif hasattr(e, "code"):
             raise TorrentDownloadError, e
         else:
             raise TorrentDownloadError, "Unknown URLError: %s" % e
Example #6
0
 def _get_rss_feed(self, url=None):
     """Returns the feedparser object and stores it in the rss property.
     
     Arguments:
     url - Feed URL to download. Default: "feed" in config.
     """
     if not url:
         url = config['feed'] % self.exact_name
     logging.debug("Downloading and processing %s..." % url)
     last_modified = None
     if self.feed_last_modified:
         last_modified = self.feed_last_modified.timetuple()
     r = feedparser.parse(
         url,
         etag = self.feed_etag,
         modified = last_modified,)
         #agent = USER_AGENT,) # FIXME: only one entry is downloaded with 
                               # this for some reason
     http_status = r.get('status', 200)
     http_headers = r.get('headers', {
       'content-type': 'application/rss+xml', 
       'content-length':'1'})
     exc_type = r.get("bozo_exception", Exception()).__class__
     if not r.entries and not r.get('version', ''):
         msg = None
         if http_status not in [200, 302]: 
             raise ShowFeedError, "HTTP error %s: %s" % (http_status, url)
         elif http_status == 304:
             raise ShowFeedNotModifiedError
         elif 'html' in http_headers.get('content-type', 'rss'):
             raise ShowFeedError, "Looks like HTML: %s" % url
         elif http_headers.get('content-length', '1') == '0':
             raise ShowFeedError, "Empty page: %s" % url
         elif hasattr(socket, 'timeout') and exc_type == socket.timeout:
             raise ShowFeedError, "Connection timed out: %s" % url
         elif exc_type == IOError:
             raise ShowFeedError, "%s: %s" % (r.bozo_exception, url)
         elif hasattr(feedparser, 'zlib') \
                 and exc_type == feedparser.zlib.error:
             raise ShowFeedError, "Broken compression: %s" % f.url
         elif exc_type in socket_errors:
             raise ShowFeedError, "%s: %s" \
                                  % (r.bozo_exception.args[1] + f.url)
         elif exc_type == urllib2.URLError:
             if r.bozo_exception.reason.__class__ in socket_errors:
                 exc_reason = r.bozo_exception.reason.args[1]
             else:
                 exc_reason = r.bozo_exception.reason
             raise ShowFeedError, "%s: %s" % (exc_reason, url)
         elif exc_type == KeyboardInterrupt:
             raise r.bozo_exception
         else:
             raise ShowFeedError, "%s: %s" \
                 % (r.get("bozo_exception", "can't process"), f.url)
     self.rss = r
     try:
         self.feed_etag = r.etag
     except AttributeError:
         pass
     if hasattr(r, "modified"):
         self.feed_last_modified = datetime.datetime(* r.modified[:6])
     else:
         self.feed_last_modified = None
     return r
Example #7
0
     # Step 3: If these all fail to find a working tracker, use first 
     # tracker without scraping support that can be connected to.
     if not chosen_tracker:
         logging.debug("Falling back to a tracker without scrape support.")
         for url in no_scrape_trackers:
             request = urllib2.Request(req_url)
             request.add_header('User-Agent', USER_AGENT)
             try:
                 f = urllib2.urlopen(request)
             except urllib2.URLError, e:
                 continue
             chosen_tracker = url
             break
     if not chosen_tracker:
         raise TorrentDownloadError, "No working tracker found"
     logging.debug("Working tracker found (%s)" % chosen_tracker)
     self.dict = torrent_dict
     self.file = torrent_file
     self.tracker_response = tracker_response
     return torrent_dict
 
 def save(self, directory=None, filename=None, retry=3):
     """Save torrent to path, or output-directory or output-directory2
     in the configuration if called with no arguments. It is downloaded 
     if it hasn't been already. Returns path that torrent was saved to.
     
     Arguments:
     Directory - Full directory to save torrent under.
                 Default: output-directory from config
     filename - Filename to save torrent as. Default: Automatically
                generated from episode details.