Exemplo n.º 1
0
    def cache_image(self, image_url):
        path = ek.ek(os.path.abspath, ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images', 'imdb_popular'))

        if not os.path.exists(path):
            os.makedirs(path)

        full_path = os.path.join(path, os.path.basename(image_url))

        if not os.path.isfile(full_path):
            helpers.download_file(image_url, full_path, session=self.session)
Exemplo n.º 2
0
    def cache_image(self, image_url):
        """
        Store cache of image in cache dir
        :param image_url: Source URL
        """
        path = ek(os.path.abspath, ek(os.path.join, sickbeard.CACHE_DIR, 'images', 'imdb_popular'))

        if not ek(os.path.exists, path):
            ek(os.makedirs, path)

        full_path = ek(os.path.join, path, ek(os.path.basename, image_url))

        if not ek(os.path.isfile, full_path):
            helpers.download_file(image_url, full_path, session=self.session)
Exemplo n.º 3
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        if self.proxy.isEnabled():
            self.headers.update({"Referer": self.proxy.getProxyURL()})
        elif "Referer" in self.headers:
            self.headers.pop("Referer")

        for url in urls:
            if "NO_DOWNLOAD_NAME" in url:
                continue

            logger.log(u"Downloading a result from " + self.name + " at " + url)
            if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers._remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 4
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        for url in urls:
            logger.log(u"Downloading a result from " + self.name + " at " +
                       url)
            if helpers.download_file(url, filename, session=self.session):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers._remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 5
0
    def download_result(self, result):
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update({
                    'Referer': '/'.join(url.split('/')[:3]) + '/'
                })

            logger.log('Downloading a result from {0} at {1}'.format(self.name, url))

            downloaded_filename = download_file(url, filename, session=self.session, headers=self.headers,
                                                hooks={'response': self.get_url_hook}, return_filename=True)
            if downloaded_filename:
                if self._verify_download(downloaded_filename):
                    logger.log('Saved result to {0}'.format(downloaded_filename), logger.INFO)
                    return True

                logger.log('Could not download {0}'.format(url), logger.WARNING)
                remove_file_failed(downloaded_filename)

        if urls:
            logger.log('Failed to download any results', logger.WARNING)

        return False
Exemplo n.º 6
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        for url in urls:
            # Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.
            data = self.getURL(url)
            url_torrent = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()

            if url_torrent.startswith('http'):
                self.headers.update({'Referer': '/'.join(url_torrent.split('/')[:3]) + '/'})

            logger.log(u"Downloading a result from " + self.name + " at " + url)

            if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers.remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 7
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        if self.proxy.isEnabled():
            self.headers.update({'Referer': self.proxy.getProxyURL()})
        elif 'Referer' in self.headers:
            self.headers.pop('Referer')

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            logger.log(u"Downloading a result from " + self.name + " at " + url)
            if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers._remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 8
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        for url in urls:
            logger.log(u"Downloading a result from " + self.name + " at " + url)
            if helpers.download_file(url, filename, session=self.session):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers._remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 9
0
    def download_result(self, result):
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update({
                    'Referer': '/'.join(url.split('/')[:3]) + '/'
                })

            logger.log(u'Downloading a result from %s at %s' % (self.name, url))

            if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
                filename = replace_extension(filename, GenericProvider.TORRENT)

            if download_file(url, filename, session=self.session, headers=self.headers):
                if self._verify_download(filename):
                    logger.log(u'Saved result to %s' % filename, logger.INFO)
                    return True

                logger.log(u'Could not download %s' % url, logger.WARNING)
                remove_file_failed(filename)

        if len(urls):
            logger.log(u'Failed to download any results', logger.WARNING)

        return False
Exemplo n.º 10
0
    def download_result(self, result):
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update({
                    'Referer': '/'.join(url.split('/')[:3]) + '/'
                })

            logger.log(u'Downloading a result from {0} at {1}'.format(self.name, url))

            if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
                filename = replace_extension(filename, GenericProvider.TORRENT)

            if download_file(url, filename, session=self.session, headers=self.headers, hooks={'response': self.get_url_hook}):
                if self._verify_download(filename):
                    logger.log(u'Saved result to {0}'.format(filename), logger.INFO)
                    return True

                logger.log(u'Could not download {0}'.format(url), logger.WARNING)
                remove_file_failed(filename)

        if urls:
            logger.log(u'Failed to download any results', logger.WARNING)

        return False
Exemplo n.º 11
0
    def download_result(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            # Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.
            data = self.get_url(url, returns='text')
            url_torrent = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()

            if url_torrent.startswith('http'):
                self.headers.update({'Referer': '/'.join(url_torrent.split('/')[:3]) + '/'})

            logger.log('Downloading a result from {}'.format(url))

            if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):
                if self._verify_download(filename):
                    logger.log('Saved result to {}'.format(filename), logger.INFO)
                    return True
                else:
                    logger.log('Could not download {}'.format(url), logger.WARNING)
                    helpers.remove_file_failed(filename)

        if len(urls):
            logger.log('Failed to download any results', logger.WARNING)

        return False
Exemplo n.º 12
0
    def download_result(self, result):
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            if "NO_DOWNLOAD_NAME" in url:
                continue

            if url.startswith("http"):
                self.headers.update({"Referer": "/".join(url.split("/")[:3]) + "/"})

            logger.log(u"Downloading a result from {0} at {1}".format(self.name, url))

            if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
                filename = replace_extension(filename, GenericProvider.TORRENT)

            if download_file(
                url, filename, session=self.session, headers=self.headers, hooks={"response": self.get_url_hook}
            ):
                if self._verify_download(filename):
                    logger.log(u"Saved result to {0}".format(filename), logger.INFO)
                    return True

                logger.log(u"Could not download {0}".format(url), logger.WARNING)
                remove_file_failed(filename)

        if urls:
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 13
0
    def cache_image(self, indexerid):
        """
        Store cache of image in cache dir
        :param indexerid: Source indexer id
        """
        path = ek(os.path.abspath,
                  ek(os.path.join, sickbeard.CACHE_DIR, 'images', 'favorites'))

        if not ek(os.path.exists, path):
            ek(os.makedirs, path)

        full_path = ek(os.path.join, path, str(indexerid))

        if not ek(os.path.isfile, full_path):
            helpers.download_file(
                sickchill.indexer.series_poster_url_by_id(indexerid),
                full_path,
                session=self.session)
Exemplo n.º 14
0
def _update_zoneinfo():

    # now check if the zoneinfo needs update
    url_zv = 'http://github.com/Prinz23/sb_network_timezones/raw/master/zoneinfo.txt'

    url_data = helpers.getURL(url_zv)

    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(u"Loading zoneinfo.txt failed. Unable to get URL: " + url_zv, logger.DEBUG)
        return

    if (lib.dateutil.zoneinfo.ZONEINFOFILE != None):
        cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
    else:
        cur_zoneinfo = None
    (new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')

    if ((cur_zoneinfo != None) and (new_zoneinfo == cur_zoneinfo)):
        return

    # now load the new zoneinfo
    url_tar = u'http://github.com/Prinz23/sb_network_timezones/raw/master/' + new_zoneinfo
    zonefile = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + new_zoneinfo)
    zonefile_tmp = re.sub(r"\.tar\.gz$",'.tmp', zonefile)

    if (os.path.exists(zonefile_tmp)):
        try:
            os.remove(zonefile_tmp)
        except:
            logger.log(u"Unable to delete: " + zonefile_tmp,logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if (zoneinfo_md5.upper() == new_hash.upper()):
        logger.log(u"Updating timezone info with new one: " + new_zoneinfo,logger.MESSAGE)
        try:
            # remove the old zoneinfo file
            if (cur_zoneinfo != None):
                old_file = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + cur_zoneinfo)
                if (os.path.exists(old_file)):
                    os.remove(old_file)
            # rename downloaded file
            os.rename(zonefile_tmp,zonefile)
            # load the new zoneinfo
            reload(lib.dateutil.zoneinfo)
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(u"MD5 HASH doesn't match: " + zoneinfo_md5.upper() + ' File: ' + new_hash.upper(),logger.ERROR)
        return
Exemplo n.º 15
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        if self.providerType == GenericProvider.TORRENT:
            try:
                torrent_hash = re.findall('urn:btih:([\w]{32,40})',
                                          result.url)[0].upper()
                if not torrent_hash:
                    logger.log(
                        "Unable to extract torrent hash from link: " +
                        ex(result.url), logger.ERROR)
                    return False

                urls = [
                    'http://torcache.net/torrent/' + torrent_hash + '.torrent',
                    'http://torrage.com/torrent/' + torrent_hash + '.torrent',
                    'http://zoink.it/torrent/' + torrent_hash + '.torrent',
                ]
            except:
                urls = [result.url]

            filename = ek.ek(
                os.path.join, sickbeard.TORRENT_DIR,
                helpers.sanitizeFileName(result.name) + '.' +
                self.providerType)
        elif self.providerType == GenericProvider.NZB:
            urls = [result.url]

            filename = ek.ek(
                os.path.join, sickbeard.NZB_DIR,
                helpers.sanitizeFileName(result.name) + '.' +
                self.providerType)
        else:
            return

        for url in urls:
            if helpers.download_file(url, filename, session=self.session):
                logger.log(u"Downloading a result from " + self.name + " at " +
                           url)

                if self.providerType == GenericProvider.TORRENT:
                    logger.log(u"Saved magnet link to " + filename,
                               logger.MESSAGE)
                else:
                    logger.log(u"Saved result to " + filename, logger.MESSAGE)

                if self._verify_download(filename):
                    return True

        logger.log(u"Failed to download result", logger.ERROR)
        return False
Exemplo n.º 16
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        if self.providerType == GenericProvider.TORRENT:
            try:
                torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()
                torrent_name = re.findall('dn=([^&]+)', result.url)[0]

                if len(torrent_hash) == 32:
                    torrent_hash = b16encode(b32decode(torrent_hash)).upper()

                if not torrent_hash:
                    logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
                    return False

                urls = [
                    'http://torcache.net/torrent/' + torrent_hash + '.torrent',
                    'http://zoink.ch/torrent/' + torrent_name + '.torrent',
                    'http://torrage.com/torrent/' + torrent_hash + '.torrent',
                ]
            except:
                urls = [result.url]

            filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
        elif self.providerType == GenericProvider.NZB:
            urls = [result.url]

            filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
        else:
            return

        for url in urls:
            logger.log(u"Downloading a result from " + self.name + " at " + url)
            if helpers.download_file(url, filename, session=self.session):
                if self._verify_download(filename):
                    if self.providerType == GenericProvider.TORRENT:
                        logger.log(u"Saved magnet link to " + filename, logger.INFO)
                    else:
                        logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers._remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 17
0
    def cache_image(self, image_url):
        """
        Store cache of image in cache dir
        :param image_url: Source URL
        """
        if not self.cache_subfolder:
            return
        
        self.image_src = ek(posixpath.join, 'images', self.cache_subfolder, ek(os.path.basename, image_url))
        
        path = ek(os.path.abspath, ek(os.path.join, sickbeard.CACHE_DIR, 'images', self.cache_subfolder))

        if not ek(os.path.exists, path):
            ek(os.makedirs, path)

        full_path = ek(posixpath.join, path, ek(os.path.basename, image_url))

        if not ek(os.path.isfile, full_path):
            helpers.download_file(image_url, full_path, session=self.session)
Exemplo n.º 18
0
    def cache_image(self, image_url):
        """
        Store cache of image in cache dir

        :param image_url: Source URL
        """
        if not self.cache_subfolder:
            return
        
        self.image_src = ek(posixpath.join, u'images', self.cache_subfolder, ek(os.path.basename, image_url))
        
        path = ek(os.path.abspath, ek(os.path.join, sickbeard.CACHE_DIR, u'images', self.cache_subfolder))

        if not ek(os.path.exists, path):
            ek(os.makedirs, path)

        full_path = ek(posixpath.join, path, ek(os.path.basename, image_url))

        if not ek(os.path.isfile, full_path):
            helpers.download_file(image_url, full_path, session=self.session)
Exemplo n.º 19
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        if self.providerType == GenericProvider.TORRENT:
            try:
                torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0].upper()

                if len(torrent_hash) == 32:
                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()

                if not torrent_hash:
                    logger.log("Unable to extract torrent hash from link: " + ex(result.url), logger.ERROR)
                    return False

                urls = [
                    'http://torcache.net/torrent/' + torrent_hash + '.torrent',
                    'http://torrage.com/torrent/' + torrent_hash + '.torrent',
                    'http://zoink.it/torrent/' + torrent_hash + '.torrent',
                ]
            except:
                urls = [result.url]

            filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
        elif self.providerType == GenericProvider.NZB:
            urls = [result.url]

            filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
        else:
            return

        for url in urls:
            if helpers.download_file(url, filename, session=self.session):
                logger.log(u"Downloading a result from " + self.name + " at " + url)

                if self.providerType == GenericProvider.TORRENT:
                    logger.log(u"Saved magnet link to " + filename, logger.MESSAGE)
                else:
                    logger.log(u"Saved result to " + filename, logger.MESSAGE)

                #TODO This is not working on Android for some reason
                #if self._verify_download(filename):
                return True

        logger.log(u"Failed to download result", logger.ERROR)
        return False
Exemplo n.º 20
0
    def download_result(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._do_login():
            return False

        if GenericProvider.TORRENT == self.providerType:
            try:
                torrent_hash = re.findall('urn:btih:([0-9a-f]{32,40})', result.url)[0].upper()

                if 32 == len(torrent_hash):
                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()

                if not torrent_hash:
                    logger.log('Unable to extract torrent hash from link: ' + ex(result.url), logger.ERROR)
                    return False

                urls = ['https://%s/%s.torrent' % (u, torrent_hash)
                        for u in ('torcache.net/torrent', 'torrage.com/torrent', 'getstrike.net/torrents/api/download')]
            except:
                urls = [result.url]

            filename = ek.ek(os.path.join, sickbeard.TORRENT_DIR,
                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
        elif GenericProvider.NZB == self.providerType:
            urls = [result.url]

            filename = ek.ek(os.path.join, sickbeard.NZB_DIR,
                             helpers.sanitizeFileName(result.name) + '.' + self.providerType)
        else:
            return

        for url in urls:
            if helpers.download_file(url, filename, session=self.session):
                logger.log(u'Downloading a result from ' + self.name + ' at ' + url)

                if GenericProvider.TORRENT == self.providerType:
                    logger.log(u'Saved magnet link to ' + filename, logger.MESSAGE)
                else:
                    logger.log(u'Saved result to ' + filename, logger.MESSAGE)

                if self._verify_download(filename):
                    return True
                elif ek.ek(os.path.isfile, filename):
                    ek.ek(os.remove, filename)

        logger.log(u'Failed to download result', logger.ERROR)
        return False
Exemplo n.º 21
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        if self.proxy.isEnabled():
            self.headers.update({'Referer': self.proxy.getProxyURL()})
        elif 'Referer' in self.headers:
            self.headers.pop('Referer')

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if not self.proxy.isEnabled() and url.startswith('http'):
                # Let's just set a referer for every .torrent/.nzb, should work as a cover-all without side-effects
                self.headers.update(
                    {'Referer': '/'.join(url.split('/')[:3]) + '/'})

            logger.log(u"Downloading a result from " + self.name + " at " +
                       url)

            # Support for Jackett/TorzNab
            if url.endswith(GenericProvider.TORRENT) and filename.endswith(
                    GenericProvider.NZB):
                filename = filename.rsplit(
                    '.', 1)[0] + '.' + GenericProvider.TORRENT

            if helpers.download_file(self.proxy._buildURL(url),
                                     filename,
                                     session=self.session,
                                     headers=self.headers):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers._remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 22
0
    def download_result(self, result):
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        if urls:
            if result.url.startswith('magnet'):
                # opening in browser
                try:
                    import webbrowser
                    logger.log(u'Opening magnet link in browser: {0}'.format(result.url), logger.DEBUG)
                    try:
                        return webbrowser.open(result.url, 2, 1)
                    except Exception:
                        try:
                            return webbrowser.open(result.url, 1, 1)
                        except Exception:
                            logger.log(u"Unable to launch a browser", logger.ERROR)
                except ImportError:
                    logger.log(u"Unable to load the webbrowser module, cannot launch the browser.", logger.WARNING)
            
            logger.log(u'Failed to download any results', logger.WARNING)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update({
                    'Referer': '/'.join(url.split('/')[:3]) + '/'
                })

            logger.log('Downloading a result from {0} at {1}'.format(self.name, url))

            downloaded_filename = download_file(url, filename, session=self.session, headers=self.headers,
                                                hooks={'response': self.get_url_hook}, return_filename=True)
            if downloaded_filename:
                if self._verify_download(downloaded_filename):
                    logger.log('Saved result to {0}'.format(downloaded_filename), logger.INFO)
                    return True

                logger.log('Could not download {0}'.format(url), logger.WARNING)
                remove_file_failed(downloaded_filename)

        return False
Exemplo n.º 23
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update(
                    {'Referer': '/'.join(url.split('/')[:3]) + '/'})

            logging.info("Downloading a result from " + self.name + " at " +
                         url)

            # Support for Jackett/TorzNab
            if url.endswith(GenericProvider.TORRENT) and filename.endswith(
                    GenericProvider.NZB):
                filename = filename.rsplit(
                    '.', 1)[0] + '.' + GenericProvider.TORRENT

            if helpers.download_file(url,
                                     filename,
                                     session=self.session,
                                     headers=self.headers):
                if self._verify_download(filename):
                    logging.info("Saved result to " + filename)
                    return True
                else:
                    logging.warning("Could not download %s" % url)
                    helpers.remove_file_failed(filename)

        if len(urls):
            logging.warning("Failed to download any results")

        return False
Exemplo n.º 24
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        if self.proxy.isEnabled():
            self.headers.update({'Referer': self.proxy.getProxyURL()})
        elif 'Referer' in self.headers:
            self.headers.pop('Referer')

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if not self.proxy.isEnabled() and url.startswith('http'):
                # Let's just set a referer for every .torrent/.nzb, should work as a cover-all without side-effects
                self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})

            logger.log(u"Downloading a result from " + self.name + " at " + url)

            # Support for Jackett/TorzNab
            if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
                filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT

            if helpers.download_file(self.proxy._buildURL(url), filename, session=self.session, headers=self.headers):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers._remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 25
0
    def downloadResult(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._doLogin():
            return False

        urls, filename = self._makeURL(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update({'Referer': '/'.join(url.split('/')[:3]) + '/'})

            logger.log(u"Downloading a result from " + self.name + " at " + url)

            # Support for Jackett/TorzNab
            if url.endswith(GenericProvider.TORRENT) and filename.endswith(GenericProvider.NZB):
                filename = filename.rsplit('.', 1)[0] + '.' + GenericProvider.TORRENT

            if helpers.download_file(url, filename, session=self.session, headers=self.headers):
                if self._verify_download(filename):
                    logger.log(u"Saved result to " + filename, logger.INFO)
                    return True
                else:
                    logger.log(u"Could not download %s" % url, logger.WARNING)
                    helpers.remove_file_failed(filename)

        if len(urls):
            logger.log(u"Failed to download any results", logger.WARNING)

        return False
Exemplo n.º 26
0
def change_unrar_tool(unrar_tool, alt_unrar_tool):

    # Check for failed unrar attempt, and remove it
    # Must be done before unrar is ever called or the self-extractor opens and locks startup
    bad_unrar = os.path.join(sickbeard.DATA_DIR, 'unrar.exe')
    if os.path.exists(bad_unrar) and os.path.getsize(bad_unrar) == 447440:
        try:
            os.remove(bad_unrar)
        except OSError as e:
            logger.log("Unable to delete bad unrar.exe file {0}: {1}. You should delete it manually".format(bad_unrar, e.strerror), logger.WARNING)

    try:
        rarfile.custom_check(unrar_tool)
    except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
        # Let's just return right now if the defaults work
        try:
            # noinspection PyProtectedMember
            test = rarfile._check_unrar_tool()
            if test:
                # These must always be set to something before returning
                sickbeard.UNRAR_TOOL = rarfile.UNRAR_TOOL
                sickbeard.ALT_UNRAR_TOOL = rarfile.ALT_TOOL
                return True
        except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
            pass

        if platform.system() == 'Windows':
            # Look for WinRAR installations
            found = False
            winrar_path = 'WinRAR\\UnRAR.exe'
            # Make a set of unique paths to check from existing environment variables
            check_locations = {
                os.path.join(location, winrar_path) for location in (
                    os.environ.get("ProgramW6432"), os.environ.get("ProgramFiles(x86)"),
                    os.environ.get("ProgramFiles"), re.sub(r'\s?\(x86\)', '', os.environ["ProgramFiles"])
                ) if location
            }
            check_locations.add(os.path.join(sickbeard.PROG_DIR, 'unrar\\unrar.exe'))

            for check in check_locations:
                if ek(os.path.isfile, check):
                    # Can use it?
                    try:
                        rarfile.custom_check(check)
                        unrar_tool = check
                        found = True
                        break
                    except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
                        found = False

            # Download
            if not found:
                logger.log('Trying to download unrar.exe and set the path')
                unrar_store = ek(os.path.join, sickbeard.PROG_DIR, 'unrar')  # ./unrar (folder)
                unrar_zip = ek(os.path.join, sickbeard.PROG_DIR, 'unrar_win.zip')  # file download

                if (helpers.download_file(
                    "http://sickrage.github.io/unrar/unrar_win.zip", filename=unrar_zip, session=helpers.make_session()
                ) and helpers.extractZip(archive=unrar_zip, targetDir=unrar_store)):
                    try:
                        ek(os.remove, unrar_zip)
                    except OSError as e:
                        logger.log("Unable to delete downloaded file {0}: {1}. You may delete it manually".format(unrar_zip, e.strerror))

                    check = os.path.join(unrar_store, "unrar.exe")
                    try:
                        rarfile.custom_check(check)
                        unrar_tool = check
                        logger.log('Successfully downloaded unrar.exe and set as unrar tool', )
                    except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
                        logger.log('Sorry, unrar was not set up correctly. Try installing WinRAR and make sure it is on the system PATH')
                else:
                    logger.log('Unable to download unrar.exe')

    # These must always be set to something before returning
    sickbeard.UNRAR_TOOL = rarfile.UNRAR_TOOL = rarfile.ORIG_UNRAR_TOOL = unrar_tool
    sickbeard.ALT_UNRAR_TOOL = rarfile.ALT_TOOL = alt_unrar_tool

    try:
        # noinspection PyProtectedMember
        test = rarfile._check_unrar_tool()
    except (rarfile.RarCannotExec, rarfile.RarExecError, OSError, IOError):
        if sickbeard.UNPACK == 1:
            logger.log('Disabling UNPACK setting because no unrar is installed.')
            sickbeard.UNPACK = 0
        test = False

    return test
Exemplo n.º 27
0
    def update(self):  # pylint: disable=too-many-statements
        """
        Downloads the latest source tarball from github and installs it over the existing version.
        """

        tar_download_url = 'http://github.com/' + sickbeard.GIT_ORG + '/' + sickbeard.GIT_REPO + '/tarball/' + self.branch

        try:
            # prepare the update dir
            sr_update_dir = ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')

            if ek(os.path.isdir, sr_update_dir):
                logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
                shutil.rmtree(sr_update_dir)

            logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
            ek(os.makedirs, sr_update_dir)

            # retrieve file
            logger.log(u"Downloading update from {url}".format(url=tar_download_url))
            tar_download_path = ek(os.path.join, sr_update_dir, u'sr-update.tar')
            helpers.download_file(tar_download_url, tar_download_path, session=self.session)

            if not ek(os.path.isfile, tar_download_path):
                logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.WARNING)
                return False

            if not ek(tarfile.is_tarfile, tar_download_path):
                logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR)
                return False

            # extract to sr-update dir
            logger.log(u"Extracting file " + tar_download_path)
            tar = tarfile.open(tar_download_path)
            tar.extractall(sr_update_dir)
            tar.close()

            # delete .tar.gz
            logger.log(u"Deleting file " + tar_download_path)
            ek(os.remove, tar_download_path)

            # find update dir name
            update_dir_contents = [x for x in ek(os.listdir, sr_update_dir) if
                                   ek(os.path.isdir, ek(os.path.join, sr_update_dir, x))]

            if len(update_dir_contents) != 1:
                logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR)
                return False

            # walk temp folder and move files to main folder
            content_dir = ek(os.path.join, sr_update_dir, update_dir_contents[0])
            logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
            for dirname, stderr_, filenames in ek(os.walk, content_dir):  # @UnusedVariable
                dirname = dirname[len(content_dir) + 1:]
                for curfile in filenames:
                    old_path = ek(os.path.join, content_dir, dirname, curfile)
                    new_path = ek(os.path.join, sickbeard.PROG_DIR, dirname, curfile)

                    # Avoid DLL access problem on WIN32/64
                    # These files needing to be updated manually
                    # or find a way to kill the access from memory
                    if curfile in ('unrar.dll', 'unrar64.dll'):
                        try:
                            ek(os.chmod, new_path, stat.S_IWRITE)
                            ek(os.remove, new_path)
                            ek(os.renames, old_path, new_path)
                        except Exception as e:
                            logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG)
                            ek(os.remove, old_path)  # Trash the updated file without moving in new path
                        continue

                    if ek(os.path.isfile, new_path):
                        ek(os.remove, new_path)
                    ek(os.renames, old_path, new_path)

            sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
            sickbeard.CUR_COMMIT_BRANCH = self.branch

        except Exception as e:
            logger.log(u"Error while trying to update: " + ex(e), logger.ERROR)
            logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG)
            return False

        # Notify update successful
        try:
            notifiers.notify_git_update(sickbeard.CUR_COMMIT_HASH or "")
        except Exception:
            logger.log(u"Unable to send update notification. Continuing the update process", logger.DEBUG)
        return True
Exemplo n.º 28
0
def _update_zoneinfo():
    """
    Request new zoneinfo directly from repository
    """
    global sb_timezone
    sb_timezone = tz.tzlocal()
    url_zv = 'http://sickragetv.github.io/network_timezones/zoneinfo.txt'
    try:
        url_data = helpers.getURL(url_zv, session=requests.Session())
        if not url_data:
            raise

        # Filename of existing zoneinfo
        if zoneinfo.ZONEINFOFILE is not None:
            cur_zoneinfo = ek(basename, zoneinfo.ZONEINFOFILE)
        else:
            cur_zoneinfo = None

        # Filename and hash of new zoneinfo
        (new_zoneinfo, zoneinfo_md5) = url_data.strip().rsplit(u' ')
    except Exception as e:
        logger.log(u'Loading zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s' %
                url_zv, logger.WARNING)
        return

    if (cur_zoneinfo is not None) and (new_zoneinfo == cur_zoneinfo):
        return

    # now load the new zoneinfo
    url_tar = u'http://sickragetv.github.io/network_timezones/%s' % new_zoneinfo

    zonefile = helpers.real_path(ek(join, ek(os.path.dirname, zoneinfo.__file__), new_zoneinfo))
    zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile)

    if ek(os.path.exists, zonefile_tmp):
        try:
            ekk(os.remove, zonefile_tmp)
        except:
            logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.WARNING)
            return

    if not helpers.download_file(url_tar, zonefile_tmp, session=requests.Session()):
        return

    if not ek(os.path.exists, zonefile_tmp):
        logger.log(u'Download of %s failed.' % zonefile_tmp, logger.WARNING)
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if zoneinfo_md5.upper() == new_hash.upper():
        logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.INFO)
        try:
            # remove the old zoneinfo file
            if cur_zoneinfo is not None:
                old_file = helpers.real_path(
                    ek(join, ek(os.path.dirname, zoneinfo.__file__), cur_zoneinfo))
                if ek(os.path.exists, old_file):
                    ek(os.remove, old_file)
            # rename downloaded file
            ek(os.rename, zonefile_tmp, zonefile)
            # load the new zoneinfo
            reload(zoneinfo)
            sb_timezone = tz.tzlocal()
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(u'MD5 hash does not match: %s File: %s' % (zoneinfo_md5.upper(), new_hash.upper()), logger.WARNING)
        return
Exemplo n.º 29
0
def _update_zoneinfo():
    global sb_timezone
    sb_timezone = tz.tzlocal()

    # now check if the zoneinfo needs update
    url_zv = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/zoneinfo.txt'

    url_data = helpers.getURL(url_zv)
    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(
            u'Loading zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s'
            % url_zv, logger.WARNING)
        return

    zonefilename = zoneinfo._ZONEFILENAME
    cur_zoneinfo = zonefilename
    if None is not cur_zoneinfo:
        cur_zoneinfo = ek.ek(basename, zonefilename)
    zonefile = helpers.real_path(
        ek.ek(join, ek.ek(os.path.dirname, zoneinfo.__file__), cur_zoneinfo))
    zonemetadata = zoneinfo.gettz_db_metadata() if ek.ek(
        os.path.isfile, zonefile) else None
    (new_zoneinfo,
     zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')
    newtz_regex = re.search(r'(\d{4}[^.]+)', new_zoneinfo)
    if not newtz_regex or len(newtz_regex.groups()) != 1:
        return
    newtzversion = newtz_regex.group(1)

    if cur_zoneinfo is not None and zonemetadata is not None and 'tzversion' in zonemetadata and zonemetadata[
            'tzversion'] == newtzversion:
        return

    # now load the new zoneinfo
    url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/%s' % new_zoneinfo

    zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile)

    if ek.ek(os.path.exists, zonefile_tmp):
        try:
            ek.ek(os.remove, zonefile_tmp)
        except:
            logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    if not ek.ek(os.path.exists, zonefile_tmp):
        logger.log(u'Download of %s failed.' % zonefile_tmp, logger.ERROR)
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if zoneinfo_md5.upper() == new_hash.upper():
        logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo,
                   logger.MESSAGE)
        try:
            # remove the old zoneinfo file
            if cur_zoneinfo is not None:
                old_file = helpers.real_path(
                    ek.ek(join, ek.ek(os.path.dirname, zoneinfo.__file__),
                          cur_zoneinfo))
                if ek.ek(os.path.exists, old_file):
                    ek.ek(os.remove, old_file)
            # rename downloaded file
            ek.ek(os.rename, zonefile_tmp, zonefile)
            from dateutil.zoneinfo import gettz
            if '_CLASS_ZONE_INSTANCE' in gettz.func_globals:
                gettz.func_globals.__setitem__('_CLASS_ZONE_INSTANCE', list())

            sb_timezone = tz.tzlocal()
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(
            u'MD5 hash does not match: %s File: %s' %
            (zoneinfo_md5.upper(), new_hash.upper()), logger.ERROR)
        return
Exemplo n.º 30
0
    def update(self):
        """
        Downloads the latest source tarball from github and installs it over the existing version.
        """

        tar_download_url = 'http://github.com/' + self.github_org + '/' + self.github_repo + '/tarball/' + self.branch

        try:
            # prepare the update dir
            sr_update_dir = ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')

            if os.path.isdir(sr_update_dir):
                logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
                shutil.rmtree(sr_update_dir)

            logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
            os.makedirs(sr_update_dir)

            # retrieve file
            logger.log(u"Downloading update from " + repr(tar_download_url))
            tar_download_path = os.path.join(sr_update_dir, u'sr-update.tar')
            helpers.download_file(tar_download_url, tar_download_path, session=self.session)

            if not ek(os.path.isfile, tar_download_path):
                logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.WARNING)
                return False

            if not ek(tarfile.is_tarfile, tar_download_path):
                logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR)
                return False

            # extract to sr-update dir
            logger.log(u"Extracting file " + tar_download_path)
            tar = tarfile.open(tar_download_path)
            tar.extractall(sr_update_dir)
            tar.close()

            # delete .tar.gz
            logger.log(u"Deleting file " + tar_download_path)
            os.remove(tar_download_path)

            # find update dir name
            update_dir_contents = [x for x in os.listdir(sr_update_dir) if
                                   os.path.isdir(os.path.join(sr_update_dir, x))]
            if len(update_dir_contents) != 1:
                logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR)
                return False
            content_dir = os.path.join(sr_update_dir, update_dir_contents[0])

            # walk temp folder and move files to main folder
            logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
            for dirname, _, filenames in os.walk(content_dir):  # @UnusedVariable
                dirname = dirname[len(content_dir) + 1:]
                for curfile in filenames:
                    old_path = os.path.join(content_dir, dirname, curfile)
                    new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile)

                    # Avoid DLL access problem on WIN32/64
                    # These files needing to be updated manually
                    # or find a way to kill the access from memory
                    if curfile in ('unrar.dll', 'unrar64.dll'):
                        try:
                            os.chmod(new_path, stat.S_IWRITE)
                            os.remove(new_path)
                            os.renames(old_path, new_path)
                        except Exception, e:
                            logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG)
                            os.remove(old_path)  # Trash the updated file without moving in new path
                        continue

                    if os.path.isfile(new_path):
                        os.remove(new_path)
                    os.renames(old_path, new_path)

            sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
            sickbeard.CUR_COMMIT_BRANCH = self.branch
Exemplo n.º 31
0
def search_providers(show, episodes, manual_search=False, torrent_only=False, try_other_searches=False, old_status=None, scheduled=False):
    found_results = {}
    final_results = []

    search_done = False

    orig_thread_name = threading.currentThread().name

    use_quality_list = None
    if any([episodes]):
        old_status = old_status or failed_history.find_old_status(episodes[0]) or episodes[0].status
        if old_status:
            status, quality = Quality.splitCompositeStatus(old_status)
            use_quality_list = (status not in (
                common.WANTED, common.FAILED, common.UNAIRED, common.SKIPPED, common.IGNORED, common.UNKNOWN))

    provider_list = [x for x in sickbeard.providers.sortedProviderList() if x.is_active() and x.enable_backlog and
                     (not torrent_only or x.providerType == GenericProvider.TORRENT) and
                     (not scheduled or x.enable_scheduled_backlog)]
    for cur_provider in provider_list:
        if cur_provider.anime_only and not show.is_anime:
            logger.log(u'%s is not an anime, skipping' % show.name, logger.DEBUG)
            continue

        threading.currentThread().name = '%s :: [%s]' % (orig_thread_name, cur_provider.name)
        provider_id = cur_provider.get_id()

        found_results[provider_id] = {}

        search_count = 0
        search_mode = getattr(cur_provider, 'search_mode', 'eponly')

        while True:
            search_count += 1

            if 'eponly' == search_mode:
                logger.log(u'Performing episode search for %s' % show.name)
            else:
                logger.log(u'Performing season pack search for %s' % show.name)

            try:
                cur_provider.cache._clearCache()
                search_results = cur_provider.find_search_results(show, episodes, search_mode, manual_search,
                                                                  try_other_searches=try_other_searches)
                if any(search_results):
                    logger.log(', '.join(['%s %s candidate%s' % (
                        len(v), (('multiep', 'season')[SEASON_RESULT == k], 'episode')['ep' in search_mode],
                        helpers.maybe_plural(len(v))) for (k, v) in search_results.iteritems()]))
            except exceptions.AuthException as e:
                logger.log(u'Authentication error: %s' % ex(e), logger.ERROR)
                break
            except Exception as e:
                logger.log(u'Error while searching %s, skipping: %s' % (cur_provider.name, ex(e)), logger.ERROR)
                logger.log(traceback.format_exc(), logger.ERROR)
                break
            finally:
                threading.currentThread().name = orig_thread_name

            search_done = True

            if len(search_results):
                # make a list of all the results for this provider
                for cur_ep in search_results:
                    # skip non-tv crap
                    search_results[cur_ep] = filter(
                        lambda ep_item: show_name_helpers.pass_wordlist_checks(
                            ep_item.name, parse=False, indexer_lookup=False) and
                                        ep_item.show == show, search_results[cur_ep])

                    if cur_ep in found_results:
                        found_results[provider_id][cur_ep] += search_results[cur_ep]
                    else:
                        found_results[provider_id][cur_ep] = search_results[cur_ep]

                break
            elif not getattr(cur_provider, 'search_fallback', False) or 2 == search_count:
                break

            search_mode = '%sonly' % ('ep', 'sp')['ep' in search_mode]
            logger.log(u'Falling back to %s search ...' % ('season pack', 'episode')['ep' in search_mode])

        # skip to next provider if we have no results to process
        if not len(found_results[provider_id]):
            continue

        any_qualities, best_qualities = Quality.splitQuality(show.quality)

        # pick the best season NZB
        best_season_result = None
        if SEASON_RESULT in found_results[provider_id]:
            best_season_result = pick_best_result(found_results[provider_id][SEASON_RESULT], show,
                                                  any_qualities + best_qualities)

        highest_quality_overall = 0
        for cur_episode in found_results[provider_id]:
            for cur_result in found_results[provider_id][cur_episode]:
                if Quality.UNKNOWN != cur_result.quality and highest_quality_overall < cur_result.quality:
                    highest_quality_overall = cur_result.quality
        logger.log(u'%s is the highest quality of any match' % Quality.qualityStrings[highest_quality_overall],
                   logger.DEBUG)

        # see if every episode is wanted
        if best_season_result:
            # get the quality of the season nzb
            season_qual = best_season_result.quality
            logger.log(u'%s is the quality of the season %s' % (Quality.qualityStrings[season_qual],
                                                                best_season_result.provider.providerType), logger.DEBUG)

            my_db = db.DBConnection()
            sql = 'SELECT season, episode FROM tv_episodes WHERE showid = %s AND (season IN (%s))' %\
                  (show.indexerid, ','.join([str(x.season) for x in episodes]))
            ep_nums = [(int(x['season']), int(x['episode'])) for x in my_db.select(sql)]

            logger.log(u'Executed query: [%s]' % sql)
            logger.log(u'Episode list: %s' % ep_nums, logger.DEBUG)

            all_wanted = True
            any_wanted = False
            for ep_num in ep_nums:
                if not show.wantEpisode(ep_num[0], ep_num[1], season_qual):
                    all_wanted = False
                else:
                    any_wanted = True

            # if we need every ep in the season and there's nothing better then just download this and
            # be done with it (unless single episodes are preferred)
            if all_wanted and highest_quality_overall == best_season_result.quality:
                logger.log(u'Every episode in this season is needed, downloading the whole %s %s' %
                           (best_season_result.provider.providerType, best_season_result.name))
                ep_objs = []
                for ep_num in ep_nums:
                    ep_objs.append(show.getEpisode(ep_num[0], ep_num[1]))
                best_season_result.episodes = ep_objs

                return [best_season_result]

            elif not any_wanted:
                logger.log(u'No episodes from this season are wanted at this quality, ignoring the result of ' +
                           best_season_result.name, logger.DEBUG)
            else:
                if GenericProvider.NZB == best_season_result.provider.providerType:
                    logger.log(u'Breaking apart the NZB and adding the individual ones to our results', logger.DEBUG)

                    # if not, break it apart and add them as the lowest priority results
                    individual_results = nzbSplitter.splitResult(best_season_result)

                    individual_results = filter(
                        lambda r: show_name_helpers.pass_wordlist_checks(
                            r.name, parse=False, indexer_lookup=False) and r.show == show, individual_results)

                    for cur_result in individual_results:
                        if 1 == len(cur_result.episodes):
                            ep_num = cur_result.episodes[0].episode
                        elif 1 < len(cur_result.episodes):
                            ep_num = MULTI_EP_RESULT

                        if ep_num in found_results[provider_id]:
                            found_results[provider_id][ep_num].append(cur_result)
                        else:
                            found_results[provider_id][ep_num] = [cur_result]

                # If this is a torrent all we can do is leech the entire torrent,
                # user will have to select which eps not do download in his torrent client
                else:

                    # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it
                    logger.log(u'Adding multi episode result for full season torrent. In your torrent client, set ' +
                               u'the episodes that you do not want to "don\'t download"')
                    ep_objs = []
                    for ep_num in ep_nums:
                        ep_objs.append(show.getEpisode(ep_num[0], ep_num[1]))
                    best_season_result.episodes = ep_objs

                    ep_num = MULTI_EP_RESULT
                    if ep_num in found_results[provider_id]:
                        found_results[provider_id][ep_num].append(best_season_result)
                    else:
                        found_results[provider_id][ep_num] = [best_season_result]

        # go through multi-ep results and see if we really want them or not, get rid of the rest
        multi_results = {}
        if MULTI_EP_RESULT in found_results[provider_id]:
            for multi_result in found_results[provider_id][MULTI_EP_RESULT]:

                logger.log(u'Checking usefulness of multi episode result [%s]' % multi_result.name, logger.DEBUG)

                if sickbeard.USE_FAILED_DOWNLOADS and failed_history.has_failed(multi_result.name, multi_result.size,
                                                                                multi_result.provider.name):
                    logger.log(u'Rejecting previously failed multi episode result [%s]' % multi_result.name)
                    continue

                # see how many of the eps that this result covers aren't covered by single results
                needed_eps = []
                not_needed_eps = []
                for ep_obj in multi_result.episodes:
                    ep_num = ep_obj.episode
                    # if we have results for the episode
                    if ep_num in found_results[provider_id] and 0 < len(found_results[provider_id][ep_num]):
                        needed_eps.append(ep_num)
                    else:
                        not_needed_eps.append(ep_num)

                logger.log(u'Single episode check result is... needed episodes: %s, not needed episodes: %s' %
                           (needed_eps, not_needed_eps), logger.DEBUG)

                if not not_needed_eps:
                    logger.log(u'All of these episodes were covered by single episode results, ' +
                               'ignoring this multi episode result', logger.DEBUG)
                    continue

                # check if these eps are already covered by another multi-result
                multi_needed_eps = []
                multi_not_needed_eps = []
                for ep_obj in multi_result.episodes:
                    ep_num = ep_obj.episode
                    if ep_num in multi_results:
                        multi_not_needed_eps.append(ep_num)
                    else:
                        multi_needed_eps.append(ep_num)

                logger.log(u'Multi episode check result is... multi needed episodes: ' +
                           '%s, multi not needed episodes: %s' % (multi_needed_eps, multi_not_needed_eps), logger.DEBUG)

                if not multi_needed_eps:
                    logger.log(u'All of these episodes were covered by another multi episode nzb, ' +
                               'ignoring this multi episode result',
                               logger.DEBUG)
                    continue

                # if we're keeping this multi-result then remember it
                for ep_obj in multi_result.episodes:
                    multi_results[ep_obj.episode] = multi_result

                # don't bother with the single result if we're going to get it with a multi result
                for ep_obj in multi_result.episodes:
                    ep_num = ep_obj.episode
                    if ep_num in found_results[provider_id]:
                        logger.log(u'A needed multi episode result overlaps with a single episode result for episode ' +
                                   '#%s, removing the single episode results from the list' % ep_num, logger.DEBUG)
                        del found_results[provider_id][ep_num]

        # of all the single ep results narrow it down to the best one for each episode
        final_results += set(multi_results.values())
        quality_list = use_quality_list and (None, best_qualities)[any(best_qualities)] or None
        for cur_ep in found_results[provider_id]:
            if cur_ep in (MULTI_EP_RESULT, SEASON_RESULT):
                continue

            if 0 == len(found_results[provider_id][cur_ep]):
                continue

            best_result = pick_best_result(found_results[provider_id][cur_ep], show, quality_list,
                                           filter_rls=orig_thread_name)

            # if all results were rejected move on to the next episode
            if not best_result:
                continue

            # filter out possible bad torrents from providers
            if 'torrent' == best_result.resultType:
                if not best_result.url.startswith('magnet') and None is not best_result.get_data_func:
                    best_result.url = best_result.get_data_func(best_result.url)
                    best_result.get_data_func = None  # consume only once
                    if not best_result.url:
                        continue
                if best_result.url.startswith('magnet'):
                    if 'blackhole' != sickbeard.TORRENT_METHOD:
                        best_result.content = None
                else:
                    cache_file = ek.ek(os.path.join, sickbeard.CACHE_DIR or helpers._getTempDir(),
                                       '%s.torrent' % (helpers.sanitizeFileName(best_result.name)))
                    if not helpers.download_file(best_result.url, cache_file, session=best_result.provider.session):
                        continue

                    try:
                        with open(cache_file, 'rb') as fh:
                            td = fh.read()
                        setattr(best_result, 'cache_file', cache_file)
                    except (StandardError, Exception):
                        continue

                    if getattr(best_result.provider, 'chk_td', None):
                        name = None
                        try:
                            hdr = re.findall('(\w+(\d+):)', td[0:6])[0]
                            x, v = len(hdr[0]), int(hdr[1])
                            while x < len(td):
                                y = x + v
                                name = 'name' == td[x: y]
                                w = re.findall('((?:i-?\d+e|e+|d|l+)*(\d+):)', td[y: y + 32])[0]
                                x, v = y + len(w[0]), int(w[1])
                                if name:
                                    name = td[x: x + v]
                                    break
                        except (StandardError, Exception):
                            continue
                        if name:
                            if not pass_show_wordlist_checks(name, show):
                                continue
                            if not show_name_helpers.pass_wordlist_checks(name, indexer_lookup=False):
                                logger.log('Ignored: %s (debug log has detail)' % name)
                                continue
                            best_result.name = name

                    if 'blackhole' != sickbeard.TORRENT_METHOD:
                        best_result.content = td

            # add result if its not a duplicate and
            found = False
            for i, result in enumerate(final_results):
                for best_result_ep in best_result.episodes:
                    if best_result_ep in result.episodes:
                        if best_result.quality > result.quality:
                            final_results.pop(i)
                        else:
                            found = True
            if not found:
                final_results += [best_result]

        # check that we got all the episodes we wanted first before doing a match and snatch
        wanted_ep_count = 0
        for wanted_ep in episodes:
            for result in final_results:
                if wanted_ep in result.episodes and is_final_result(result):
                    wanted_ep_count += 1

        # make sure we search every provider for results unless we found everything we wanted
        if len(episodes) == wanted_ep_count:
            break

    if not len(provider_list):
        logger.log('No NZB/Torrent providers in Media Providers/Options are allowed for active searching', logger.WARNING)
    elif not search_done:
        logger.log('Failed active search of %s enabled provider%s. More info in debug log.' % (
            len(provider_list), helpers.maybe_plural(len(provider_list))), logger.ERROR)
    elif not any(final_results):
        logger.log('No suitable candidates')

    return final_results
Exemplo n.º 32
0
def _update_zoneinfo():
    global sb_timezone
    sb_timezone = tz.tzlocal()

    # now check if the zoneinfo needs update
    url_zv = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/zoneinfo.txt'

    try:
        url_data = helpers.getURL(url_zv)
        if not url_data:
            raise

        if lib.dateutil.zoneinfo.ZONEINFOFILE is not None:
            cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
        else:
            cur_zoneinfo = None

        (new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')
    except:
        # When urlData is None, trouble connecting to github
        logger.log(u'Loading zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s' % url_zv,
                   logger.WARNING)
        return

    if (cur_zoneinfo is not None) and (new_zoneinfo == cur_zoneinfo):
        return

    # now load the new zoneinfo
    url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/%s' % new_zoneinfo

    zonefile = helpers.real_path(ek.ek(join, ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__), new_zoneinfo))
    zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile)

    if ek.ek(os.path.exists, zonefile_tmp):
        try:
            ek.ek(os.remove, zonefile_tmp)
        except:
            logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    if not ek.ek(os.path.exists, zonefile_tmp):
        logger.log(u'Download of %s failed.' % zonefile_tmp, logger.ERROR)
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if zoneinfo_md5.upper() == new_hash.upper():
        logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.INFO)
        try:
            # remove the old zoneinfo file
            if cur_zoneinfo is not None:
                old_file = helpers.real_path(
                    ek.ek(join, ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__), cur_zoneinfo))
                if ek.ek(os.path.exists, old_file):
                    ek.ek(os.remove, old_file)
            # rename downloaded file
            ek.ek(os.rename, zonefile_tmp, zonefile)
            # load the new zoneinfo
            reload(lib.dateutil.zoneinfo)
            sb_timezone = tz.tzlocal()
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(u'MD5 hash does not match: %s File: %s' % (zoneinfo_md5.upper(), new_hash.upper()), logger.ERROR)
        return
Exemplo n.º 33
0
def search_providers(show,
                     episodes,
                     manual_search=False,
                     torrent_only=False,
                     try_other_searches=False,
                     old_status=None,
                     scheduled=False):
    found_results = {}
    final_results = []

    search_done = False

    orig_thread_name = threading.currentThread().name

    use_quality_list = None
    if any([episodes]):
        old_status = old_status or failed_history.find_old_status(
            episodes[0]) or episodes[0].status
        if old_status:
            status, quality = Quality.splitCompositeStatus(old_status)
            use_quality_list = (status not in (common.WANTED, common.FAILED,
                                               common.UNAIRED, common.SKIPPED,
                                               common.IGNORED, common.UNKNOWN))

    provider_list = [
        x for x in sickbeard.providers.sortedProviderList()
        if x.is_active() and x.enable_backlog and
        (not torrent_only or x.providerType == GenericProvider.TORRENT) and (
            not scheduled or x.enable_scheduled_backlog)
    ]
    for cur_provider in provider_list:
        if cur_provider.anime_only and not show.is_anime:
            logger.log(u'%s is not an anime, skipping' % show.name,
                       logger.DEBUG)
            continue

        threading.currentThread().name = '%s :: [%s]' % (orig_thread_name,
                                                         cur_provider.name)
        provider_id = cur_provider.get_id()

        found_results[provider_id] = {}

        search_count = 0
        search_mode = cur_provider.search_mode

        while True:
            search_count += 1

            if 'eponly' == search_mode:
                logger.log(u'Performing episode search for %s' % show.name)
            else:
                logger.log(u'Performing season pack search for %s' % show.name)

            try:
                cur_provider.cache._clearCache()
                search_results = cur_provider.find_search_results(
                    show,
                    episodes,
                    search_mode,
                    manual_search,
                    try_other_searches=try_other_searches)
                if any(search_results):
                    logger.log(', '.join([
                        '%s %s candidate%s' %
                        (len(v), (('multiep', 'season')[SEASON_RESULT == k],
                                  'episode')['ep' in search_mode],
                         helpers.maybe_plural(len(v)))
                        for (k, v) in search_results.iteritems()
                    ]))
            except exceptions.AuthException as e:
                logger.log(u'Authentication error: %s' % ex(e), logger.ERROR)
                break
            except Exception as e:
                logger.log(
                    u'Error while searching %s, skipping: %s' %
                    (cur_provider.name, ex(e)), logger.ERROR)
                logger.log(traceback.format_exc(), logger.ERROR)
                break
            finally:
                threading.currentThread().name = orig_thread_name

            search_done = True

            if len(search_results):
                # make a list of all the results for this provider
                for cur_ep in search_results:
                    # skip non-tv crap
                    search_results[cur_ep] = filter(
                        lambda ep_item: show_name_helpers.pass_wordlist_checks(
                            ep_item.name, parse=False, indexer_lookup=False)
                        and ep_item.show == show, search_results[cur_ep])

                    if cur_ep in found_results:
                        found_results[provider_id][cur_ep] += search_results[
                            cur_ep]
                    else:
                        found_results[provider_id][cur_ep] = search_results[
                            cur_ep]

                break
            elif not cur_provider.search_fallback or search_count == 2:
                break

            search_mode = '%sonly' % ('ep', 'sp')['ep' in search_mode]
            logger.log(u'Falling back to %s search ...' %
                       ('season pack', 'episode')['ep' in search_mode])

        # skip to next provider if we have no results to process
        if not len(found_results[provider_id]):
            continue

        any_qualities, best_qualities = Quality.splitQuality(show.quality)

        # pick the best season NZB
        best_season_result = None
        if SEASON_RESULT in found_results[provider_id]:
            best_season_result = pick_best_result(
                found_results[provider_id][SEASON_RESULT], show,
                any_qualities + best_qualities)

        highest_quality_overall = 0
        for cur_episode in found_results[provider_id]:
            for cur_result in found_results[provider_id][cur_episode]:
                if Quality.UNKNOWN != cur_result.quality and highest_quality_overall < cur_result.quality:
                    highest_quality_overall = cur_result.quality
        logger.log(
            u'%s is the highest quality of any match' %
            Quality.qualityStrings[highest_quality_overall], logger.DEBUG)

        # see if every episode is wanted
        if best_season_result:
            # get the quality of the season nzb
            season_qual = best_season_result.quality
            logger.log(
                u'%s is the quality of the season %s' %
                (Quality.qualityStrings[season_qual],
                 best_season_result.provider.providerType), logger.DEBUG)

            my_db = db.DBConnection()
            sql = 'SELECT episode FROM tv_episodes WHERE showid = %s AND (season IN (%s))' %\
                  (show.indexerid, ','.join([str(x.season) for x in episodes]))
            ep_nums = [int(x['episode']) for x in my_db.select(sql)]

            logger.log(u'Executed query: [%s]' % sql)
            logger.log(u'Episode list: %s' % ep_nums, logger.DEBUG)

            all_wanted = True
            any_wanted = False
            for ep_num in ep_nums:
                for season in set([x.season for x in episodes]):
                    if not show.wantEpisode(season, ep_num, season_qual):
                        all_wanted = False
                    else:
                        any_wanted = True

            # if we need every ep in the season and there's nothing better then just download this and
            # be done with it (unless single episodes are preferred)
            if all_wanted and highest_quality_overall == best_season_result.quality:
                logger.log(
                    u'Every episode in this season is needed, downloading the whole %s %s'
                    % (best_season_result.provider.providerType,
                       best_season_result.name))
                ep_objs = []
                for ep_num in ep_nums:
                    for season in set([x.season for x in episodes]):
                        ep_objs.append(show.getEpisode(season, ep_num))
                best_season_result.episodes = ep_objs

                return [best_season_result]

            elif not any_wanted:
                logger.log(
                    u'No episodes from this season are wanted at this quality, ignoring the result of '
                    + best_season_result.name, logger.DEBUG)
            else:
                if GenericProvider.NZB == best_season_result.provider.providerType:
                    logger.log(
                        u'Breaking apart the NZB and adding the individual ones to our results',
                        logger.DEBUG)

                    # if not, break it apart and add them as the lowest priority results
                    individual_results = nzbSplitter.splitResult(
                        best_season_result)

                    individual_results = filter(
                        lambda r: show_name_helpers.pass_wordlist_checks(
                            r.name, parse=False, indexer_lookup=False) and r.
                        show == show, individual_results)

                    for cur_result in individual_results:
                        if 1 == len(cur_result.episodes):
                            ep_num = cur_result.episodes[0].episode
                        elif 1 < len(cur_result.episodes):
                            ep_num = MULTI_EP_RESULT

                        if ep_num in found_results[provider_id]:
                            found_results[provider_id][ep_num].append(
                                cur_result)
                        else:
                            found_results[provider_id][ep_num] = [cur_result]

                # If this is a torrent all we can do is leech the entire torrent,
                # user will have to select which eps not do download in his torrent client
                else:

                    # Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it
                    logger.log(
                        u'Adding multi episode result for full season torrent. In your torrent client, set '
                        +
                        u'the episodes that you do not want to "don\'t download"'
                    )
                    ep_objs = []
                    for ep_num in ep_nums:
                        for season in set([x.season for x in episodes]):
                            ep_objs.append(show.getEpisode(season, ep_num))
                    best_season_result.episodes = ep_objs

                    ep_num = MULTI_EP_RESULT
                    if ep_num in found_results[provider_id]:
                        found_results[provider_id][ep_num].append(
                            best_season_result)
                    else:
                        found_results[provider_id][ep_num] = [
                            best_season_result
                        ]

        # go through multi-ep results and see if we really want them or not, get rid of the rest
        multi_results = {}
        if MULTI_EP_RESULT in found_results[provider_id]:
            for multi_result in found_results[provider_id][MULTI_EP_RESULT]:

                logger.log(
                    u'Checking usefulness of multi episode result [%s]' %
                    multi_result.name, logger.DEBUG)

                if sickbeard.USE_FAILED_DOWNLOADS and failed_history.has_failed(
                        multi_result.name, multi_result.size,
                        multi_result.provider.name):
                    logger.log(
                        u'Rejecting previously failed multi episode result [%s]'
                        % multi_result.name)
                    continue

                # see how many of the eps that this result covers aren't covered by single results
                needed_eps = []
                not_needed_eps = []
                for ep_obj in multi_result.episodes:
                    ep_num = ep_obj.episode
                    # if we have results for the episode
                    if ep_num in found_results[provider_id] and 0 < len(
                            found_results[provider_id][ep_num]):
                        needed_eps.append(ep_num)
                    else:
                        not_needed_eps.append(ep_num)

                logger.log(
                    u'Single episode check result is... needed episodes: %s, not needed episodes: %s'
                    % (needed_eps, not_needed_eps), logger.DEBUG)

                if not not_needed_eps:
                    logger.log(
                        u'All of these episodes were covered by single episode results, '
                        + 'ignoring this multi episode result', logger.DEBUG)
                    continue

                # check if these eps are already covered by another multi-result
                multi_needed_eps = []
                multi_not_needed_eps = []
                for ep_obj in multi_result.episodes:
                    ep_num = ep_obj.episode
                    if ep_num in multi_results:
                        multi_not_needed_eps.append(ep_num)
                    else:
                        multi_needed_eps.append(ep_num)

                logger.log(
                    u'Multi episode check result is... multi needed episodes: '
                    + '%s, multi not needed episodes: %s' %
                    (multi_needed_eps, multi_not_needed_eps), logger.DEBUG)

                if not multi_needed_eps:
                    logger.log(
                        u'All of these episodes were covered by another multi episode nzb, '
                        + 'ignoring this multi episode result', logger.DEBUG)
                    continue

                # if we're keeping this multi-result then remember it
                for ep_obj in multi_result.episodes:
                    multi_results[ep_obj.episode] = multi_result

                # don't bother with the single result if we're going to get it with a multi result
                for ep_obj in multi_result.episodes:
                    ep_num = ep_obj.episode
                    if ep_num in found_results[provider_id]:
                        logger.log(
                            u'A needed multi episode result overlaps with a single episode result for episode '
                            +
                            '#%s, removing the single episode results from the list'
                            % ep_num, logger.DEBUG)
                        del found_results[provider_id][ep_num]

        # of all the single ep results narrow it down to the best one for each episode
        final_results += set(multi_results.values())
        quality_list = use_quality_list and (
            None, best_qualities)[any(best_qualities)] or None
        for cur_ep in found_results[provider_id]:
            if cur_ep in (MULTI_EP_RESULT, SEASON_RESULT):
                continue

            if 0 == len(found_results[provider_id][cur_ep]):
                continue

            best_result = pick_best_result(found_results[provider_id][cur_ep],
                                           show, quality_list)

            # if all results were rejected move on to the next episode
            if not best_result:
                continue

            # filter out possible bad torrents from providers
            if 'torrent' == best_result.resultType:
                if not best_result.url.startswith(
                        'magnet') and None is not best_result.get_data_func:
                    best_result.url = best_result.get_data_func(
                        best_result.url)
                    best_result.get_data_func = None  # consume only once
                    if not best_result.url:
                        continue
                if best_result.url.startswith('magnet'):
                    if 'blackhole' != sickbeard.TORRENT_METHOD:
                        best_result.content = None
                else:
                    cache_file = ek.ek(
                        os.path.join, sickbeard.CACHE_DIR
                        or helpers._getTempDir(), '%s.torrent' %
                        (helpers.sanitizeFileName(best_result.name)))
                    if not helpers.download_file(
                            best_result.url,
                            cache_file,
                            session=best_result.provider.session):
                        continue

                    try:
                        with open(cache_file, 'rb') as fh:
                            td = fh.read()
                        setattr(best_result, 'cache_file', cache_file)
                    except (StandardError, Exception):
                        continue

                    if getattr(best_result.provider, 'chk_td', None):
                        name = None
                        try:
                            hdr = re.findall('(\w+(\d+):)', td[0:6])[0]
                            x, v = len(hdr[0]), int(hdr[1])
                            while x < len(td):
                                y = x + v
                                name = 'name' == td[x:y]
                                w = re.findall('((?:i-?\d+e|e+|d|l+)*(\d+):)',
                                               td[y:y + 32])[0]
                                x, v = y + len(w[0]), int(w[1])
                                if name:
                                    name = td[x:x + v]
                                    break
                        except (StandardError, Exception):
                            continue
                        if name:
                            if not pass_show_wordlist_checks(name, show):
                                continue
                            if not show_name_helpers.pass_wordlist_checks(
                                    name, indexer_lookup=False):
                                logger.log(
                                    'Ignored: %s (debug log has detail)' %
                                    name)
                                continue
                            best_result.name = name

                    if 'blackhole' != sickbeard.TORRENT_METHOD:
                        best_result.content = td

            # add result if its not a duplicate and
            found = False
            for i, result in enumerate(final_results):
                for best_result_ep in best_result.episodes:
                    if best_result_ep in result.episodes:
                        if best_result.quality > result.quality:
                            final_results.pop(i)
                        else:
                            found = True
            if not found:
                final_results += [best_result]

        # check that we got all the episodes we wanted first before doing a match and snatch
        wanted_ep_count = 0
        for wanted_ep in episodes:
            for result in final_results:
                if wanted_ep in result.episodes and is_final_result(result):
                    wanted_ep_count += 1

        # make sure we search every provider for results unless we found everything we wanted
        if len(episodes) == wanted_ep_count:
            break

    if not len(provider_list):
        logger.log(
            'No NZB/Torrent providers in Media Providers/Options are allowed for active searching',
            logger.WARNING)
    elif not search_done:
        logger.log(
            'Failed active search of %s enabled provider%s. More info in debug log.'
            % (len(provider_list), helpers.maybe_plural(len(provider_list))),
            logger.ERROR)
    elif not any(final_results):
        logger.log('No suitable candidates')

    return final_results
Exemplo n.º 34
0
    def update(self):
        """
        Downloads the latest source tarball from github and installs it over the existing version.
        """

        tar_download_url = 'http://github.com/' + self.github_org + '/' + self.github_repo + '/tarball/' + self.branch

        try:
            # prepare the update dir
            sr_update_dir = ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')

            if ek(os.path.isdir, sr_update_dir):
                logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
                shutil.rmtree(sr_update_dir)

            logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
            ek(os.makedirs, sr_update_dir)

            # retrieve file
            logger.log(u"Downloading update from " + repr(tar_download_url))
            tar_download_path = ek(os.path.join, sr_update_dir, u'sr-update.tar')
            helpers.download_file(tar_download_url, tar_download_path, session=self.session)

            if not ek(os.path.isfile, tar_download_path):
                logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.WARNING)
                return False

            if not ek(tarfile.is_tarfile, tar_download_path):
                logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR)
                return False

            # extract to sr-update dir
            logger.log(u"Extracting file " + tar_download_path)
            tar = tarfile.open(tar_download_path)
            tar.extractall(sr_update_dir)
            tar.close()

            # delete .tar.gz
            logger.log(u"Deleting file " + tar_download_path)
            ek(os.remove, tar_download_path)

            # find update dir name
            update_dir_contents = [x for x in ek(os.listdir, sr_update_dir) if
                                   ek(os.path.isdir, ek(os.path.join, sr_update_dir, x))]
            if len(update_dir_contents) != 1:
                logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR)
                return False
            content_dir = ek(os.path.join, sr_update_dir, update_dir_contents[0])

            # walk temp folder and move files to main folder
            logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
            for dirname, _, filenames in ek(os.walk, content_dir):  # @UnusedVariable
                dirname = dirname[len(content_dir) + 1:]
                for curfile in filenames:
                    old_path = ek(os.path.join, content_dir, dirname, curfile)
                    new_path = ek(os.path.join, sickbeard.PROG_DIR, dirname, curfile)

                    # Avoid DLL access problem on WIN32/64
                    # These files needing to be updated manually
                    # or find a way to kill the access from memory
                    if curfile in ('unrar.dll', 'unrar64.dll'):
                        try:
                            ek(os.chmod, new_path, stat.S_IWRITE)
                            ek(os.remove, new_path)
                            ek(os.renames, old_path, new_path)
                        except Exception as e:
                            logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG)
                            ek(os.remove, old_path)  # Trash the updated file without moving in new path
                        continue

                    if ek(os.path.isfile, new_path):
                        ek(os.remove, new_path)
                    ek(os.renames, old_path, new_path)

            sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
            sickbeard.CUR_COMMIT_BRANCH = self.branch

        except Exception as e:
            logger.log(u"Error while trying to update: " + ex(e), logger.ERROR)
            logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG)
            return False

        # Notify update successful
        try:
            notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING)
        except Exception:
            logger.log(u"Unable to send update notification. Continuing the update process", logger.DEBUG)
        return True
Exemplo n.º 35
0
 def cache_image(self, image_url, image_path):
     # Only cache if the file does not exist yet
     if not ek(os.path.isfile, image_path):
         helpers.download_file(image_url, image_path, session=self.session)
Exemplo n.º 36
0
def _update_zoneinfo():
    global sb_timezone
    sb_timezone = tz.tzlocal()

    # now check if the zoneinfo needs update
    url_zv = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/zoneinfo.txt'

    url_data = helpers.getURL(url_zv)

    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(u"Loading zoneinfo.txt failed. Unable to get URL: " + url_zv, logger.ERROR)
        return

    if (lib.dateutil.zoneinfo.ZONEINFOFILE is not None):
        cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
    else:
        cur_zoneinfo = None
    (new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')

    if ((cur_zoneinfo is not None) and (new_zoneinfo == cur_zoneinfo)):
        return

    # now load the new zoneinfo
    url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/' + new_zoneinfo

    zonefile = helpers.real_path(ek.ek(join, ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__), new_zoneinfo))
    zonefile_tmp = re.sub(r"\.tar\.gz$", '.tmp', zonefile)

    if (ek.ek(os.path.exists, zonefile_tmp)):
        try:
            ek.ek(os.remove, zonefile_tmp)
        except:
            logger.log(u"Unable to delete: " + zonefile_tmp, logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    if not ek.ek(os.path.exists, zonefile_tmp):
        logger.log(u"Download of " + zonefile_tmp + " failed.", logger.ERROR)
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if (zoneinfo_md5.upper() == new_hash.upper()):
        logger.log(u"Updating timezone info with new one: " + new_zoneinfo, logger.MESSAGE)
        try:
            # remove the old zoneinfo file
            if (cur_zoneinfo is not None):
                old_file = helpers.real_path(
                    ek.ek(join, ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__), cur_zoneinfo))
                if (ek.ek(os.path.exists, old_file)):
                    ek.ek(os.remove, old_file)
            # rename downloaded file
            ek.ek(os.rename, zonefile_tmp, zonefile)
            # load the new zoneinfo
            reload(lib.dateutil.zoneinfo)
            sb_timezone = tz.tzlocal()
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(u"MD5 HASH doesn't match: " + zoneinfo_md5.upper() + ' File: ' + new_hash.upper(), logger.ERROR)
        return
Exemplo n.º 37
0
    def update(self):  # pylint: disable=too-many-statements
        """
        Downloads the latest source tarball from github and installs it over the existing version.
        """

        tar_download_url = 'http://github.com/' + sickbeard.GIT_ORG + '/' + sickbeard.GIT_REPO + '/tarball/' + self.branch

        try:
            # prepare the update dir
            sr_update_dir = ek(os.path.join, sickbeard.PROG_DIR, 'sr-update')

            if ek(os.path.isdir, sr_update_dir):
                logger.log("Clearing out update folder " + sr_update_dir +
                           " before extracting")
                shutil.rmtree(sr_update_dir)

            logger.log("Creating update folder " + sr_update_dir +
                       " before extracting")
            ek(os.makedirs, sr_update_dir)

            # retrieve file
            logger.log(
                "Downloading update from {url}".format(url=tar_download_url))
            tar_download_path = ek(os.path.join, sr_update_dir,
                                   'sr-update.tar')
            helpers.download_file(tar_download_url,
                                  tar_download_path,
                                  session=self.session)

            if not ek(os.path.isfile, tar_download_path):
                logger.log(
                    "Unable to retrieve new version from " + tar_download_url +
                    ", can't update", logger.WARNING)
                return False

            if not ek(tarfile.is_tarfile, tar_download_path):
                logger.log(
                    "Retrieved version from " + tar_download_url +
                    " is corrupt, can't update", logger.ERROR)
                return False

            # extract to sr-update dir
            logger.log("Extracting file " + tar_download_path)
            tar = tarfile.open(tar_download_path)
            tar.extractall(sr_update_dir)
            tar.close()

            # delete .tar.gz
            logger.log("Deleting file " + tar_download_path)
            ek(os.remove, tar_download_path)

            # find update dir name
            update_dir_contents = [
                x for x in ek(os.listdir, sr_update_dir)
                if ek(os.path.isdir, ek(os.path.join, sr_update_dir, x))
            ]

            if len(update_dir_contents) != 1:
                logger.log(
                    "Invalid update data, update failed: " +
                    str(update_dir_contents), logger.ERROR)
                return False

            # walk temp folder and move files to main folder
            content_dir = ek(os.path.join, sr_update_dir,
                             update_dir_contents[0])
            logger.log("Moving files from " + content_dir + " to " +
                       sickbeard.PROG_DIR)
            for dirname, stderr_, filenames in ek(
                    os.walk, content_dir):  # @UnusedVariable
                dirname = dirname[len(content_dir) + 1:]
                for curfile in filenames:
                    old_path = ek(os.path.join, content_dir, dirname, curfile)
                    new_path = ek(os.path.join, sickbeard.PROG_DIR, dirname,
                                  curfile)

                    if ek(os.path.isfile, new_path):
                        ek(os.remove, new_path)
                    ek(os.renames, old_path, new_path)

            sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
            sickbeard.CUR_COMMIT_BRANCH = self.branch

        except Exception as e:
            logger.log("Error while trying to update: " + ex(e), logger.ERROR)
            logger.log("Traceback: " + traceback.format_exc(), logger.DEBUG)
            return False

        # Notify update successful
        notifiers.notify_git_update(sickbeard.CUR_COMMIT_HASH or "")
        return True
Exemplo n.º 38
0
    def download_result(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._authorised():
            return False

        if GenericProvider.TORRENT == self.providerType:
            final_dir = sickbeard.TORRENT_DIR
            link_type = 'magnet'
            try:
                torrent_hash = re.findall('(?i)urn:btih:([0-9a-f]{32,40})', result.url)[0].upper()

                if 32 == len(torrent_hash):
                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()

                if not torrent_hash:
                    logger.log('Unable to extract torrent hash from link: ' + ex(result.url), logger.ERROR)
                    return False

                urls = ['http%s://%s/torrent/%s.torrent' % (u + (torrent_hash,))
                        for u in (('s', 'itorrents.org'), ('s', 'torra.pro'), ('s', 'torra.click'),
                                  ('s', 'torrage.info'), ('', 'reflektor.karmorra.info'),
                                  ('s', 'torrentproject.se'), ('', 'thetorrent.org'))]
            except (StandardError, Exception):
                link_type = 'torrent'
                urls = [result.url]

        elif GenericProvider.NZB == self.providerType:
            final_dir = sickbeard.NZB_DIR
            link_type = 'nzb'
            urls = [result.url]

        else:
            return

        ref_state = 'Referer' in self.session.headers and self.session.headers['Referer']
        saved = False
        for url in urls:
            cache_dir = sickbeard.CACHE_DIR or helpers._getTempDir()
            base_name = '%s.%s' % (helpers.sanitizeFileName(result.name), self.providerType)
            cache_file = ek.ek(os.path.join, cache_dir, base_name)

            self.session.headers['Referer'] = url
            if helpers.download_file(url, cache_file, session=self.session):

                if self._verify_download(cache_file):
                    logger.log(u'Downloaded %s result from %s' % (self.name, url))
                    final_file = ek.ek(os.path.join, final_dir, base_name)
                    try:
                        helpers.moveFile(cache_file, final_file)
                        msg = 'moved'
                    except (OSError, Exception):
                        msg = 'copied cached file'
                    logger.log(u'Saved %s link and %s to %s' % (link_type, msg, final_file))
                    saved = True
                    break

                remove_file_failed(cache_file)

        if 'Referer' in self.session.headers:
            if ref_state:
                self.session.headers['Referer'] = ref_state
            else:
                del(self.session.headers['Referer'])

        if not saved and 'magnet' == link_type:
            logger.log(u'All torrent cache servers failed to return a downloadable result', logger.ERROR)
            logger.log(u'Advice: in search settings, change from method blackhole to direct torrent client connect',
                       logger.ERROR)
            final_file = ek.ek(os.path.join, final_dir, '%s.%s' % (helpers.sanitizeFileName(result.name), link_type))
            try:
                with open(final_file, 'wb') as fp:
                    fp.write(result.url)
                    fp.flush()
                    os.fsync(fp.fileno())
                logger.log(u'Saved magnet link to file as some clients (or plugins) support this, %s' % final_file)

            except (StandardError, Exception):
                pass
        elif not saved:
            logger.log(u'Server failed to return anything useful', logger.ERROR)

        return saved
Exemplo n.º 39
0
def _update_zoneinfo():
    global sb_timezone
    sb_timezone = tz.tzlocal()

    # now check if the zoneinfo needs update
    url_zv = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/zoneinfo.txt'

    url_data = helpers.getURL(url_zv)
    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(u"Loading zoneinfo.txt failed. Unable to get URL: " + url_zv, logger.ERROR)
        return

    if (lib.dateutil.zoneinfo.ZONEINFOFILE is not None):
        cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
    else:
        cur_zoneinfo = None
    (new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')

    if ((cur_zoneinfo is not None) and (new_zoneinfo == cur_zoneinfo)):
        return

    # now load the new zoneinfo
    url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/' + new_zoneinfo

    zonefile = helpers.real_path(ek.ek(join, ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__), new_zoneinfo))
    zonefile_tmp = re.sub(r"\.tar\.gz$", '.tmp', zonefile)

    if (ek.ek(os.path.exists, zonefile_tmp)):
        try:
            ek.ek(os.remove, zonefile_tmp)
        except:
            logger.log(u"Unable to delete: " + zonefile_tmp, logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    if not ek.ek(os.path.exists, zonefile_tmp):
        logger.log(u"Download of " + zonefile_tmp + " failed.", logger.ERROR)
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if (zoneinfo_md5.upper() == new_hash.upper()):
        logger.log(u"Updating timezone info with new one: " + new_zoneinfo, logger.MESSAGE)
        try:
            # remove the old zoneinfo file
            if (cur_zoneinfo is not None):
                old_file = helpers.real_path(
                    ek.ek(join, ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__), cur_zoneinfo))
                if (ek.ek(os.path.exists, old_file)):
                    ek.ek(os.remove, old_file)
            # rename downloaded file
            ek.ek(os.rename, zonefile_tmp, zonefile)
            # load the new zoneinfo
            reload(lib.dateutil.zoneinfo)
            sb_timezone = tz.tzlocal()
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(u"MD5 HASH doesn't match: " + zoneinfo_md5.upper() + ' File: ' + new_hash.upper(), logger.ERROR)
        return
Exemplo n.º 40
0
def _update_zoneinfo():
    global sb_timezone
    sb_timezone = tz.tzlocal()

    # now check if the zoneinfo needs update
    url_zv = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/zoneinfo.txt'

    url_data = helpers.getURL(url_zv)
    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(u'Loading zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s' % url_zv,
                   logger.WARNING)
        return

    zonefilename = zoneinfo._ZONEFILENAME
    cur_zoneinfo = zonefilename
    if None is not cur_zoneinfo:
        cur_zoneinfo = ek.ek(basename, zonefilename)
    zonefile = helpers.real_path(ek.ek(join, ek.ek(os.path.dirname, zoneinfo.__file__), cur_zoneinfo))
    zonemetadata = zoneinfo.gettz_db_metadata() if ek.ek(os.path.isfile, zonefile) else None
    (new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')
    newtz_regex = re.search(r'(\d{4}[^.]+)', new_zoneinfo)
    if not newtz_regex or len(newtz_regex.groups()) != 1:
        return
    newtzversion = newtz_regex.group(1)

    if cur_zoneinfo is not None and zonemetadata is not None and 'tzversion' in zonemetadata and zonemetadata['tzversion'] == newtzversion:
        return

    # now load the new zoneinfo
    url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/%s' % new_zoneinfo

    zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile)

    if ek.ek(os.path.exists, zonefile_tmp):
        try:
            ek.ek(os.remove, zonefile_tmp)
        except:
            logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    if not ek.ek(os.path.exists, zonefile_tmp):
        logger.log(u'Download of %s failed.' % zonefile_tmp, logger.ERROR)
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if zoneinfo_md5.upper() == new_hash.upper():
        logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo, logger.MESSAGE)
        try:
            # remove the old zoneinfo file
            if cur_zoneinfo is not None:
                old_file = helpers.real_path(
                    ek.ek(join, ek.ek(os.path.dirname, zoneinfo.__file__), cur_zoneinfo))
                if ek.ek(os.path.exists, old_file):
                    ek.ek(os.remove, old_file)
            # rename downloaded file
            ek.ek(os.rename, zonefile_tmp, zonefile)
            from dateutil.zoneinfo import gettz
            if '_CLASS_ZONE_INSTANCE' in gettz.func_globals:
                gettz.func_globals.__setitem__('_CLASS_ZONE_INSTANCE', list())

            sb_timezone = tz.tzlocal()
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(u'MD5 hash does not match: %s File: %s' % (zoneinfo_md5.upper(), new_hash.upper()), logger.ERROR)
        return
Exemplo n.º 41
0
    def download_result(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._authorised():
            return False

        if GenericProvider.TORRENT == self.providerType:
            final_dir = sickbeard.TORRENT_DIR
            link_type = 'magnet'
            try:
                torrent_hash = re.findall('(?i)urn:btih:([0-9a-f]{32,40})', result.url)[0].upper()

                if 32 == len(torrent_hash):
                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()

                if not torrent_hash:
                    logger.log('Unable to extract torrent hash from link: ' + ex(result.url), logger.ERROR)
                    return False

                urls = ['http%s://%s/%s.torrent' % (u + (torrent_hash,))
                        for u in (('s', 'torcache.net/torrent'), ('', 'thetorrent.org/torrent'),
                                  ('s', 'itorrents.org/torrent'))]
            except:
                link_type = 'torrent'
                urls = [result.url]

        elif GenericProvider.NZB == self.providerType:
            final_dir = sickbeard.NZB_DIR
            link_type = 'nzb'
            urls = [result.url]

        else:
            return

        ref_state = 'Referer' in self.session.headers and self.session.headers['Referer']
        saved = False
        for url in urls:
            cache_dir = sickbeard.CACHE_DIR or helpers._getTempDir()
            base_name = '%s.%s' % (helpers.sanitizeFileName(result.name), self.providerType)
            cache_file = ek.ek(os.path.join, cache_dir, base_name)

            self.session.headers['Referer'] = url
            if helpers.download_file(url, cache_file, session=self.session):

                if self._verify_download(cache_file):
                    logger.log(u'Downloaded %s result from %s' % (self.name, url))
                    final_file = ek.ek(os.path.join, final_dir, base_name)
                    try:
                        helpers.moveFile(cache_file, final_file)
                        msg = 'moved'
                    except:
                        msg = 'copied cached file'
                    logger.log(u'Saved %s link and %s to %s' % (link_type, msg, final_file))
                    saved = True
                    break

                remove_file_failed(cache_file)

        if 'Referer' in self.session.headers:
            if ref_state:
                self.session.headers['Referer'] = ref_state
            else:
                del(self.session.headers['Referer'])

        if not saved:
            logger.log(u'All torrent cache servers failed to return a downloadable result', logger.ERROR)

        return saved
Exemplo n.º 42
0
def _update_zoneinfo():
    global sb_timezone
    sb_timezone = tz.tzlocal()

    # now check if the zoneinfo needs update
    url_zv = 'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/zoneinfo.txt'

    try:
        url_data = helpers.getURL(url_zv)
        if not url_data:
            raise

        if lib.dateutil.zoneinfo.ZONEINFOFILE is not None:
            cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
        else:
            cur_zoneinfo = None

        (new_zoneinfo,
         zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')
    except:
        # When urlData is None, trouble connecting to github
        logger.log(
            u'Loading zoneinfo.txt failed, this can happen from time to time. Unable to get URL: %s'
            % url_zv, logger.WARNING)
        return

    if (cur_zoneinfo is not None) and (new_zoneinfo == cur_zoneinfo):
        return

    # now load the new zoneinfo
    url_tar = u'https://raw.githubusercontent.com/Prinz23/sb_network_timezones/master/%s' % new_zoneinfo

    zonefile = helpers.real_path(
        ek.ek(join, ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__),
              new_zoneinfo))
    zonefile_tmp = re.sub(r'\.tar\.gz$', '.tmp', zonefile)

    if ek.ek(os.path.exists, zonefile_tmp):
        try:
            ek.ek(os.remove, zonefile_tmp)
        except:
            logger.log(u'Unable to delete: %s' % zonefile_tmp, logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    if not ek.ek(os.path.exists, zonefile_tmp):
        logger.log(u'Download of %s failed.' % zonefile_tmp, logger.ERROR)
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if zoneinfo_md5.upper() == new_hash.upper():
        logger.log(u'Updating timezone info with new one: %s' % new_zoneinfo,
                   logger.INFO)
        try:
            # remove the old zoneinfo file
            if cur_zoneinfo is not None:
                old_file = helpers.real_path(
                    ek.ek(
                        join,
                        ek.ek(os.path.dirname, lib.dateutil.zoneinfo.__file__),
                        cur_zoneinfo))
                if ek.ek(os.path.exists, old_file):
                    ek.ek(os.remove, old_file)
            # rename downloaded file
            ek.ek(os.rename, zonefile_tmp, zonefile)
            # load the new zoneinfo
            reload(lib.dateutil.zoneinfo)
            sb_timezone = tz.tzlocal()
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(
            u'MD5 hash does not match: %s File: %s' %
            (zoneinfo_md5.upper(), new_hash.upper()), logger.ERROR)
        return
Exemplo n.º 43
0
    def download_result(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._authorised():
            return False

        if GenericProvider.TORRENT == self.providerType:
            final_dir = sickbeard.TORRENT_DIR
            link_type = 'magnet'
            try:
                torrent_hash = re.findall('(?i)urn:btih:([0-9a-f]{32,40})',
                                          result.url)[0].upper()

                if 32 == len(torrent_hash):
                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()

                if not torrent_hash:
                    logger.log(
                        'Unable to extract torrent hash from link: ' +
                        ex(result.url), logger.ERROR)
                    return False

                urls = [
                    'http%s://%s/%s.torrent' % (u + (torrent_hash, ))
                    for u in (('s', 'torcache.net/torrent'),
                              ('', 'thetorrent.org/torrent'),
                              ('s', 'itorrents.org/torrent'))
                ]
            except:
                link_type = 'torrent'
                urls = [result.url]

        elif GenericProvider.NZB == self.providerType:
            final_dir = sickbeard.NZB_DIR
            link_type = 'nzb'
            urls = [result.url]

        else:
            return

        ref_state = 'Referer' in self.session.headers and self.session.headers[
            'Referer']
        saved = False
        for url in urls:
            cache_dir = sickbeard.CACHE_DIR or helpers._getTempDir()
            base_name = '%s.%s' % (helpers.sanitizeFileName(
                result.name), self.providerType)
            cache_file = ek.ek(os.path.join, cache_dir, base_name)

            self.session.headers['Referer'] = url
            if helpers.download_file(url, cache_file, session=self.session):

                if self._verify_download(cache_file):
                    logger.log(u'Downloaded %s result from %s' %
                               (self.name, url))
                    final_file = ek.ek(os.path.join, final_dir, base_name)
                    try:
                        helpers.moveFile(cache_file, final_file)
                        msg = 'moved'
                    except:
                        msg = 'copied cached file'
                    logger.log(u'Saved %s link and %s to %s' %
                               (link_type, msg, final_file))
                    saved = True
                    break

                remove_file_failed(cache_file)

        if 'Referer' in self.session.headers:
            if ref_state:
                self.session.headers['Referer'] = ref_state
            else:
                del (self.session.headers['Referer'])

        if not saved:
            logger.log(
                u'All torrent cache servers failed to return a downloadable result',
                logger.ERROR)

        return saved
Exemplo n.º 44
0
 def cache_image(self, image_url, image_path):
     # Only cache if the file does not exist yet
     if not ek(os.path.isfile, image_path):
         helpers.download_file(image_url, image_path, session=self.session)
Exemplo n.º 45
0
    def update(self):
        """
        Downloads the latest source tarball from github and installs it over the existing version.
        """

        tar_download_url = 'http://github.com/' + self.github_org + '/' + self.github_repo + '/tarball/' + self.branch

        try:
            # prepare the update dir
            sr_update_dir = ek(os.path.join, sickbeard.PROG_DIR, 'sr-update')

            if ek(os.path.isdir, sr_update_dir):
                logging.info("Clearing out update folder " + sr_update_dir + " before extracting")
                ek(removetree, sr_update_dir)

            logging.info("Creating update folder " + sr_update_dir + " before extracting")
            ek(os.makedirs, sr_update_dir)

            # retrieve file
            logging.info("Downloading update from " + repr(tar_download_url))
            tar_download_path = ek(os.path.join, sr_update_dir, 'sr-update.tar')
            helpers.download_file(tar_download_url, tar_download_path, session=self.session)

            if not ek(os.path.isfile, tar_download_path):
                logging.warning("Unable to retrieve new version from " + tar_download_url + ", can't update")
                return False

            if not ek(tarfile.is_tarfile, tar_download_path):
                logging.error("Retrieved version from " + tar_download_url + " is corrupt, can't update")
                return False

            # extract to sr-update dir
            logging.info("Extracting file " + tar_download_path)
            tar = tarfile.open(tar_download_path)
            tar.extractall(sr_update_dir)
            tar.close()

            # delete .tar.gz
            logging.info("Deleting file " + tar_download_path)
            ek(os.remove, tar_download_path)

            # find update dir name
            update_dir_contents = [x for x in ek(os.listdir, sr_update_dir) if
                                   ek(os.path.isdir, ek(os.path.join, sr_update_dir, x))]
            if len(update_dir_contents) != 1:
                logging.error("Invalid update data, update failed: " + str(update_dir_contents))
                return False
            content_dir = ek(os.path.join, sr_update_dir, update_dir_contents[0])

            # walk temp folder and move files to main folder
            logging.info("Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
            for dirname, _, filenames in ek(os.walk, content_dir):  # @UnusedVariable
                dirname = dirname[len(content_dir) + 1:]
                for curfile in filenames:
                    old_path = ek(os.path.join, content_dir, dirname, curfile)
                    new_path = ek(os.path.join, sickbeard.PROG_DIR, dirname, curfile)

                    # Avoid DLL access problem on WIN32/64
                    # These files needing to be updated manually
                    # or find a way to kill the access from memory
                    if curfile in ('unrar.dll', 'unrar64.dll'):
                        try:
                            ek(os.chmod, new_path, stat.S_IWRITE)
                            ek(os.remove, new_path)
                            ek(os.renames, old_path, new_path)
                        except Exception as e:
                            logging.debug("Unable to update " + new_path + ': ' + ex(e))
                            ek(os.remove, old_path)  # Trash the updated file without moving in new path
                        continue

                    if ek(os.path.isfile, new_path):
                        ek(os.remove, new_path)
                    ek(os.renames, old_path, new_path)

            sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
            sickbeard.CUR_COMMIT_BRANCH = self.branch

        except Exception as e:
            logging.error("Error while trying to update: {}".format(ex(e)))
            logging.debug("Traceback: " + traceback.format_exc())
            return False

        # Notify update successful
        notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING)

        return True
Exemplo n.º 46
0
    def download_result(self, result):
        """
        Save the result to disk.
        """

        # check for auth
        if not self._do_login():
            return False

        if GenericProvider.TORRENT == self.providerType:
            try:
                torrent_hash = re.findall('urn:btih:([0-9a-f]{32,40})',
                                          result.url)[0].upper()

                if 32 == len(torrent_hash):
                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()

                if not torrent_hash:
                    logger.log(
                        'Unable to extract torrent hash from link: ' +
                        ex(result.url), logger.ERROR)
                    return False

                urls = [
                    'https://%s/%s.torrent' % (u, torrent_hash)
                    for u in ('torcache.net/torrent', 'torrage.com/torrent',
                              'getstrike.net/torrents/api/download')
                ]
            except:
                urls = [result.url]

            filename = ek.ek(
                os.path.join, sickbeard.TORRENT_DIR,
                helpers.sanitizeFileName(result.name) + '.' +
                self.providerType)
        elif GenericProvider.NZB == self.providerType:
            urls = [result.url]

            filename = ek.ek(
                os.path.join, sickbeard.NZB_DIR,
                helpers.sanitizeFileName(result.name) + '.' +
                self.providerType)
        else:
            return

        for url in urls:
            if helpers.download_file(url, filename, session=self.session):
                logger.log(u'Downloading a result from ' + self.name + ' at ' +
                           url)

                if GenericProvider.TORRENT == self.providerType:
                    logger.log(u'Saved magnet link to ' + filename,
                               logger.MESSAGE)
                else:
                    logger.log(u'Saved result to ' + filename, logger.MESSAGE)

                if self._verify_download(filename):
                    return True
                elif ek.ek(os.path.isfile, filename):
                    ek.ek(os.remove, filename)

        logger.log(u'Failed to download result', logger.ERROR)
        return False