Beispiel #1
0
    def getDBcompare(self):
        """
        Compare the current DB version with the new branch version.

        :return: 'upgrade', 'equal', or 'downgrade'
        """
        try:
            self.updater.need_update()
            cur_hash = str(self.updater.get_newest_commit_hash())
            assert len(cur_hash) == 40, 'Commit hash wrong length: {length} hash: {hash}'.format(
                length=len(cur_hash), hash=cur_hash)

            check_url = 'http://cdn.rawgit.com/{org}/{repo}/{commit}/sickbeard/databases/main_db.py'.format(
                org=sickbeard.GIT_ORG, repo=sickbeard.GIT_REPO, commit=cur_hash)
            response = helpers.getURL(check_url, session=self.session)

            if response.status_code == 404:
                check_url.replace('main_db.py', 'mainDB.py')
                response = helpers.getURL(check_url, session=self.session)

            match = re.search(r'MAX_DB_VERSION\s=\s(?P<version>\d{2,3})', response.text)
            new_branch_db_version = int(match.group('version'))
            main_db_con = db.DBConnection()
            cur_branch_db_version = main_db_con.checkDBVersion()
            if new_branch_db_version > cur_branch_db_version:
                return 'upgrade'
            elif new_branch_db_version == cur_branch_db_version:
                return 'equal'
            else:
                return 'downgrade'
        except Exception as e:
            return repr(e)
Beispiel #2
0
    def get_url(self, url, post_data=None, params=None, timeout=30, json=False, need_bytes=False):  # pylint: disable=too-many-arguments
        """
        need_bytes=True when trying access to torrent info (For calling torrent client). Previously we must parse
        the URL to get torrent file
        """
        if need_bytes:
            data = helpers.getURL(url, headers=self.headers, timeout=timeout, session=self.session, returns='json')
            url = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()

        return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
                              session=self.session, json=json, need_bytes=need_bytes)
Beispiel #3
0
def sendNZB(nzb):  # pylint:disable=too-many-return-statements, too-many-branches, too-many-statements
    '''
    Sends an NZB to SABnzbd via the API.

    :param nzb: The NZBSearchResult object to send to SAB
    '''

    category = sickbeard.SAB_CATEGORY
    if nzb.show.is_anime:
        category = sickbeard.SAB_CATEGORY_ANIME

    # if it aired more than 7 days ago, override with the backlog category IDs
    for curEp in nzb.episodes:
        if datetime.date.today() - curEp.airdate > datetime.timedelta(days=7):
            category = sickbeard.SAB_CATEGORY_ANIME_BACKLOG if nzb.show.is_anime else sickbeard.SAB_CATEGORY_BACKLOG

    # set up a dict with the URL params in it
    params = {'output': 'json'}
    if sickbeard.SAB_USERNAME is not None:
        params['ma_username'] = sickbeard.SAB_USERNAME
    if sickbeard.SAB_PASSWORD is not None:
        params['ma_password'] = sickbeard.SAB_PASSWORD
    if sickbeard.SAB_APIKEY is not None:
        params['apikey'] = sickbeard.SAB_APIKEY

    if category is not None:
        params['cat'] = category

    if nzb.priority:
        params['priority'] = 2 if sickbeard.SAB_FORCED else 1

    if nzb.resultType == 'nzb':
        params['mode'] = 'addurl'
        params['name'] = nzb.url
    elif nzb.resultType == 'nzbdata':
        params['mode'] = 'addfile'
        multiPartParams = {'nzbfile': (nzb.name + '.nzb', nzb.extraInfo[0])}

    logger.log('Sending NZB to SABnzbd')

    url = urljoin(sickbeard.SAB_HOST, 'api')
    if nzb.resultType == 'nzb':
        jdata = helpers.getURL(url, params=params, session=session, returns='json', headers={'User-Agent': USER_AGENT})
    elif nzb.resultType == 'nzbdata':
        jdata = helpers.getURL(url, file=multiPartParams, session=session, returns='json', headers={'User-Agent': USER_AGENT})

    if not jdata:
        logger.log('Error connecting to sab, no data returned')
        return False

    logger.log('Result text from SAB: {0}'.format(jdata), logger.DEBUG)

    result, _ = _checkSabResponse(jdata)
    return result
Beispiel #4
0
def sendNZB(nzb):  # pylint:disable=too-many-return-statements, too-many-branches, too-many-statements
    """
    Sends an NZB to SABnzbd via the API.

    :param nzb: The NZBSearchResult object to send to SAB
    """

    category = sickbeard.SAB_CATEGORY
    if nzb.show.is_anime:
        category = sickbeard.SAB_CATEGORY_ANIME

    # if it aired more than 7 days ago, override with the backlog category IDs
    for curEp in nzb.episodes:
        if datetime.date.today() - curEp.airdate > datetime.timedelta(days=7):
            category = sickbeard.SAB_CATEGORY_ANIME_BACKLOG if nzb.show.is_anime else sickbeard.SAB_CATEGORY_BACKLOG

    # set up a dict with the URL params in it
    params = {"output": "json"}
    if sickbeard.SAB_USERNAME:
        params["ma_username"] = sickbeard.SAB_USERNAME
    if sickbeard.SAB_PASSWORD:
        params["ma_password"] = sickbeard.SAB_PASSWORD
    if sickbeard.SAB_APIKEY:
        params["apikey"] = sickbeard.SAB_APIKEY

    if category:
        params["cat"] = category

    if nzb.priority:
        params["priority"] = 2 if sickbeard.SAB_FORCED else 1

    logger.log("Sending NZB to SABnzbd")
    url = urljoin(sickbeard.SAB_HOST, "api")

    if nzb.resultType == "nzb":
        params["mode"] = "addurl"
        params["name"] = nzb.url
        jdata = helpers.getURL(url, params=params, session=session, returns="json", verify=False)
    elif nzb.resultType == "nzbdata":
        params["mode"] = "addfile"
        multiPartParams = {"nzbfile": (nzb.name + ".nzb", nzb.extraInfo[0])}
        jdata = helpers.getURL(url, params=params, file=multiPartParams, session=session, returns="json", verify=False)

    if not jdata:
        logger.log("Error connecting to sab, no data returned")
        return False

    logger.log("Result text from SAB: {0}".format(jdata), logger.DEBUG)

    result, error_ = _checkSabResponse(jdata)
    return result
Beispiel #5
0
def testAuthentication(host=None, username=None, password=None, apikey=None):
    """
    Sends a simple API request to SAB to determine if the given connection information is connect

    :param host: The host where SAB is running (incl port)
    :param username: The username to use for the HTTP request
    :param password: The password to use for the HTTP request
    :param apikey: The API key to provide to SAB
    :return: A tuple containing the success boolean and a message
    """

    # build up the URL parameters
    params = {"mode": "queue", "output": "json", "ma_username": username, "ma_password": password, "apikey": apikey}

    url = urljoin(host, "api")

    data = helpers.getURL(url, params=params, session=session, returns="json", verify=False)
    if not data:
        return False, data

    # check the result and determine if it's good or not
    result, sabText = _checkSabResponse(data)
    if not result:
        return False, sabText

    return True, "Success"
Beispiel #6
0
    def _authorised(self, **kwargs):

        return super(ShazbatProvider, self)._authorised(
            logged_in=(lambda x=None: '<input type="password"' not in helpers.getURL(
                self.urls['feeds'], session=self.session)),
            post_params={'tv_login': self.username, 'tv_password': self.password,
                         'referer': 'login', 'query': '', 'email': ''})
Beispiel #7
0
def testAuthentication(host=None, username=None, password=None, apikey=None):
    '''
    Sends a simple API request to SAB to determine if the given connection information is connect

    :param host: The host where SAB is running (incl port)
    :param username: The username to use for the HTTP request
    :param password: The password to use for the HTTP request
    :param apikey: The API key to provide to SAB
    :return: A tuple containing the success boolean and a message
    '''

    # build up the URL parameters
    params = {
        'mode': 'queue',
        'output': 'json',
        'ma_username': username,
        'ma_password': password,
        'apikey': apikey
    }

    url = urljoin(host, 'api')

    data = helpers.getURL(url, params=params, session=session, returns='json')
    if not data:
        return False, data

    # check the result and determine if it's good or not
    result, sabText = _checkSabResponse(data)
    if not result:
        return False, sabText

    return True, 'Success'
Beispiel #8
0
    def get_url_x(self, url, token=None, **kwargs):

        if not token:
            token = self.token
        if not url.startswith('http'):
            url = 'http://' + url

        for x in range(0, 3):
            if 0 < x:
                sleep(0.5)
            try:
                headers = {'X-Plex-Device-Name': 'SickGear',
                           'X-Plex-Platform': platform.system(), 'X-Plex-Device': platform.system(),
                           'X-Plex-Platform-Version': platform.release(),
                           'X-Plex-Provides': 'controller', 'X-Plex-Product': 'Python',
                           'X-Plex-Client-Identifier': self.client_id,
                           'X-Plex-Version': str(self.config_version),
                           'X-Plex-Token': token,
                           'Accept': 'application/xml'
                           }
                if self.username:
                    headers.update({'X-Plex-Username': self.username})
                page = getURL(url, headers=headers, **kwargs)
                if page:
                    parsed = etree.fromstring(page)
                    if None is not parsed and len(parsed):
                        return parsed
                    return None

            except Exception as e:
                self.log('Error requesting page: %s' % e)
                continue
        return None
Beispiel #9
0
def show_info(indexer_id):
    try:
        cachedResult = _tvtumber_cache[str(indexer_id)]
        if time.time() < (cachedResult['mtime'] + UPDATE_INTERVAL):
            # cached result is still considered current, use it
            return cachedResult['response']
        # otherwise we just fall through to lookup
    except KeyError:
        pass  # no cached value, just fall through to lookup

    url = SHOW_LOOKUP_URL + '?indexer_id=' + str(indexer_id)
    data = helpers.getURL(url, timeout=60)  # give this a longer timeout b/c it may take a while
    result = json.loads(data)
    if not result:
        logger.log(u"Empty lookup result -> failed to find show id", logger.DEBUG)
        return None
    if result['error']:
        logger.log(u"Lookup failed: " + result['errorMessage'], logger.DEBUG)
        return None

    # result is good, store it for later
    _tvtumber_cache[str(indexer_id)] = {'mtime': time.time(),
                                    'response': result['show']}

    return result['show']
Beispiel #10
0
    def get_feed(self, url, request_headers=None):

        if not self._check_auth_cookie():
            return

        session = None
        if self.provider and hasattr(self.provider, 'session'):
            session = self.provider.session

        response = helpers.getURL(url, headers=request_headers, session=session)
        if not response:
            return

        try:
            feed = feedparser.parse(response)
            if feed and 'entries' in feed:
                return feed

            if feed and 'error' in feed.feed:
                err_code = feed.feed['error']['code']
                err_desc = feed.feed['error']['description']
                logger.log(u'RSS ERROR:[%s] CODE:[%s]' % (err_desc, err_code), logger.DEBUG)
            else:
                logger.log(u'RSS error loading url: ' + url, logger.DEBUG)

        except Exception as e:
            logger.log(u'RSS error: ' + ex(e), logger.DEBUG)
Beispiel #11
0
    def _do_login(self):

        logged_in = lambda: 'gft_uid' in self.session.cookies and 'gft_pass' in self.session.cookies
        if logged_in():
            return True

        if self._check_auth():
            helpers.getURL(self.urls['login_get'], session=self.session)
            login_params = {'username': self.username, 'password': self.password}
            response = helpers.getURL(self.urls['login_post'], post_data=login_params, session=self.session)
            if response and logged_in():
                return True

            logger.log(u'Failed to authenticate with %s, abort provider.' % self.name, logger.ERROR)

        return False
Beispiel #12
0
    def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
        """
        By default this is just a simple urlopen call but this method should be overridden
        for providers with special URL requirements (like cookies)
        """

        # check for auth
        if not self._doLogin():
            return

        if self.proxy.isEnabled():
            self.headers.update({'Referer': self.proxy.getProxyURL()})
            # GlypeProxy SSL warning message
            self.proxyGlypeProxySSLwarning = self.proxy.getProxyURL(
            ) + 'includes/process.php?action=sslagree&submit=Continue anyway...'

        return helpers.getURL(
            self.proxy._buildURL(url),
            post_data=post_data,
            params=params,
            headers=self.headers,
            timeout=timeout,
            session=self.session,
            json=json,
            proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning)
Beispiel #13
0
    def getDBcompare(self):
        try:
            self.updater.need_update()
            cur_hash = str(self.updater.get_newest_commit_hash())
            assert len(
                cur_hash
            ) == 40, "Commit hash wrong length: {0} hash: {1}".format(
                len(cur_hash), cur_hash)

            check_url = "http://cdn.rawgit.com/{0}/{1}/{2}/sickbeard/databases/mainDB.py".format(
                sickbeard.GIT_ORG, sickbeard.GIT_REPO, cur_hash)
            response = helpers.getURL(check_url,
                                      session=self.session,
                                      returns='text')
            assert response, "Empty response from {0}".format(check_url)

            match = re.search(r"MAX_DB_VERSION\s=\s(?P<version>\d{2,3})",
                              response)
            branchDestDBversion = int(match.group('version'))
            main_db_con = db.DBConnection()
            branchCurrDBversion = main_db_con.checkDBVersion()
            if branchDestDBversion > branchCurrDBversion:
                return 'upgrade'
            elif branchDestDBversion == branchCurrDBversion:
                return 'equal'
            else:
                return 'downgrade'
        except Exception as e:
            return repr(e)
Beispiel #14
0
def _xem_exceptions_fetcher():
    global xem_exception_dict
    global xem_session

    if shouldRefresh('xem'):
        for indexer in sickbeard.indexerApi().indexers:
            logger.log(u"Checking for XEM scene exception updates for " + sickbeard.indexerApi(indexer).name)

            url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickbeard.indexerApi(indexer).config[
                'xem_origin']

            parsedJSON = helpers.getURL(url, session=xem_session, json=True)
            if not parsedJSON:
                logger.log(u"Check scene exceptions update failed for " + sickbeard.indexerApi(
                    indexer).name + ", Unable to get URL: " + url, logger.ERROR)
                continue

            if parsedJSON['result'] == 'failure':
                continue

            for indexerid, names in parsedJSON['data'].iteritems():
                try:
                    xem_exception_dict[int(indexerid)] = names
                except Exception as e:
                    logger.log(u"XEM: Rejected entry: indexerid:{0}; names:{1}".format(indexerid, names), logger.WARNING)
                    logger.log(u"XEM: Rejected entry error message:{0}".format(str(e)), logger.DEBUG)

        setLastRefresh('xem')

    return xem_exception_dict
    def _find_newest_version(self, whole_link=False):
        """
        Checks git for the newest Windows binary build. Returns either the
        build number or the entire build URL depending on whole_link's value.

        whole_link: If True, returns the entire URL to the release. If False, it returns
                    only the build number. default: False
        """

        regex = ".*SickBeard\-win32\-alpha\-build(\d+)(?:\.\d+)?\.zip"

        version_url_data = helpers.getURL(self.version_url)

        if version_url_data is None:
            return None
        else:
            for curLine in version_url_data.splitlines():
                logger.log(u"checking line " + curLine, logger.DEBUG)
                match = re.match(regex, curLine)
                if match:
                    logger.log(u"found a match", logger.DEBUG)
                    if whole_link:
                        return curLine.strip()
                    else:
                        return int(match.group(1))

        return None
Beispiel #16
0
    def _doLogin(self):

        if self.token is not None:
            if time.time() < (self.tokenLastUpdate + 30 * 60):
                logger.log('T411 Authentication token is still valid', logger.DEBUG)
                return True

        login_params = {'username': self.username,
                        'password': self.password}

        logger.log('Performing authentication to T411', logger.DEBUG)

        response = helpers.getURL(self.urls['login_page'], post_data=login_params, timeout=30, json=True)
        if not response:
            logger.log(u'Unable to connect to ' + self.name + ' provider.', logger.WARNING)
            return False

        if response and 'token' in response:
            self.token = response['token']
            self.tokenLastUpdate = time.time()
            self.uid = response['uid'].encode('ascii', 'ignore')
            self.session.auth = T411Auth(self.token)
            logger.log('Using T411 Authorization token : ' + self.token, logger.DEBUG)
            return True
        else:
            logger.log('T411 token not found in authentication response', logger.WARNING)
            return False
Beispiel #17
0
def _xem_excpetions_fetcher(indexer):
    global  MAX_XEM_AGE_SECS

    exception_dict = {}

    cacheDB = db.DBConnection('cache.db')

    rows = cacheDB.select("SELECT last_refreshed FROM scene_exceptions_refresh WHERE list = ?",
                       ['xem'])
    if rows:
        refresh = time.time() > (int(rows[0]['last_refreshed']) + MAX_XEM_AGE_SECS)
    else:
        refresh = True

    if refresh:
        url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickbeard.indexerApi(indexer).config['xem_origin']

        url_data = helpers.getURL(url, json=True)
        if url_data is None:
            logger.log(u"Check scene exceptions update failed. Unable to get URL: " + url, logger.ERROR)
            return exception_dict

        if url_data['result'] == 'failure':
            return exception_dict

        cacheDB.action("INSERT OR REPLACE INTO scene_exceptions_refresh (list, last_refreshed) VALUES (?,?)",
                       ['xem', time.time()])

        for indexerid, names in url_data['data'].items():
            exception_dict[int(indexerid)] = names

    return exception_dict
Beispiel #18
0
    def getURL(self, url, post_data=None, params=None, timeout=30, json=False):
        """
        By default this is just a simple urlopen call but this method should be overridden
        for providers with special URL requirements (like cookies)
        """

        # check for auth
        if not self._doLogin():
            return

        if self.proxy.isEnabled():
            self.headers.update({"Referer": self.proxy.getProxyURL()})
            self.proxyGlypeProxySSLwarning = (
                self.proxy.getProxyURL() + "includes/process.php?action=sslagree&submit=Continue anyway..."
            )
        else:
            if "Referer" in self.headers:
                self.headers.pop("Referer")
            self.proxyGlypeProxySSLwarning = None

        return helpers.getURL(
            self.proxy._buildURL(url),
            post_data=post_data,
            params=params,
            headers=self.headers,
            timeout=timeout,
            session=self.session,
            json=json,
            proxyGlypeProxySSLwarning=self.proxyGlypeProxySSLwarning,
        )
Beispiel #19
0
def _xem_exceptions_fetcher():
    global xem_exception_dict

    xem_list = 'xem_us'
    for show in sickbeard.showList:
        if show.is_anime and not show.paused:
            xem_list = 'xem'
            break

    if shouldRefresh(xem_list):
        for indexer in [i for i in sickbeard.indexerApi().indexers if 'xem_origin' in sickbeard.indexerApi(i).config]:
            logger.log(u'Checking for XEM scene exception updates for %s' % sickbeard.indexerApi(indexer).name)

            url = 'http://thexem.de/map/allNames?origin=%s%s&seasonNumbers=1'\
                  % (sickbeard.indexerApi(indexer).config['xem_origin'], ('&language=us', '')['xem' == xem_list])

            parsed_json = helpers.getURL(url, json=True, timeout=90)
            if not parsed_json:
                logger.log(u'Check scene exceptions update failed for %s, Unable to get URL: %s'
                           % (sickbeard.indexerApi(indexer).name, url), logger.ERROR)
                continue

            if 'failure' == parsed_json['result']:
                continue

            for indexerid, names in parsed_json['data'].items():
                try:
                    xem_exception_dict[int(indexerid)] = names
                except:
                    continue

        setLastRefresh(xem_list)

    return xem_exception_dict
Beispiel #20
0
    def _getTVRageInfo(self, season=None, episode=None, full=False):

        url = "http://services.tvrage.com/tools/quickinfo.php?"

        # if we need full info OR if we don't have a tvrage id, use show name
        if full == True or self.show.tvrid == 0:
            if self.show.tvrname != "" and self.show.tvrname != None:
                showName = self.show.tvrname
            else:
                showName = self.show.name

            urlData = {'show': showName.encode('utf-8')}

        # if we don't need full info and we have a tvrage id, use it
        else:
            urlData = {'sid': self.show.tvrid}

        if season != None and episode != None:
            urlData['ep'] = str(season)+'x'+str(episode)

        # build the URL
        url += urllib.urlencode(urlData)

        logger.log(u"Loading TVRage info from URL: " + url, logger.DEBUG)

        try:
            result = helpers.getURL(url).decode('utf-8')
        except (urllib2.HTTPError, IOError), e:
            logger.log(u"Unable to load TVRage info: " + e.message.decode(sickbeard.SYS_ENCODING))
            raise exceptions.TVRageException("urlopen call to " + url + " failed")
Beispiel #21
0
def _xem_get_ids(indexer_name, xem_origin):
    xem_ids = []

    url = 'http://thexem.de/map/havemap?origin=%s' % xem_origin

    task = 'Fetching show ids with%s xem scene mapping%s for origin'
    logger.log(u'%s %s' % (task % ('', 's'), indexer_name))
    parsed_json = helpers.getURL(url, json=True, timeout=90)
    if not parsed_json:
        logger.log(u'Failed %s %s, Unable to get URL: %s'
                   % (task.lower() % ('', 's'), indexer_name, url), logger.ERROR)
    else:
        if 'result' in parsed_json and 'success' == parsed_json['result'] and 'data' in parsed_json:
            try:
                for indexerid in parsed_json['data']:
                    xem_id = helpers.tryInt(indexerid)
                    if xem_id and xem_id not in xem_ids:
                        xem_ids.append(xem_id)
            except:
                pass
            if 0 == len(xem_ids):
                logger.log(u'Failed %s %s, no data items parsed from URL: %s'
                           % (task.lower() % ('', 's'), indexer_name, url), logger.WARNING)

    logger.log(u'Finished %s %s' % (task.lower() % (' %s' % len(xem_ids), helpers.maybe_plural(len(xem_ids))),
                                    indexer_name))
    return xem_ids
    def getDBcompare(self):
        try:
            self.updater.need_update()
            cur_hash = str(self.updater.get_newest_commit_hash())
            assert len(cur_hash) == 40, "Commit hash wrong length: %s hash: %s" % (len(cur_hash), cur_hash)

            check_url = "http://cdn.rawgit.com/%s/%s/%s/sickbeard/databases/mainDB.py" % (
                sickbeard.GIT_ORG,
                sickbeard.GIT_REPO,
                cur_hash,
            )
            response = helpers.getURL(check_url, session=self.session)
            assert response, "Empty response from %s" % check_url

            match = re.search(r"MAX_DB_VERSION\s=\s(?P<version>\d{2,3})", response)
            branchDestDBversion = int(match.group("version"))
            main_db_con = db.DBConnection()
            branchCurrDBversion = main_db_con.checkDBVersion()
            if branchDestDBversion > branchCurrDBversion:
                return "upgrade"
            elif branchDestDBversion == branchCurrDBversion:
                return "equal"
            else:
                return "downgrade"
        except Exception as e:
            return repr(e)
Beispiel #23
0
def _sabURLOpenSimple(url):
    try:
        result = helpers.getURL(url, throw_exc=True)
        f = StringIO.StringIO(result)
    except (EOFError, IOError), e:
        logger.log(u"Unable to connect to SAB: " + ex(e), logger.ERROR)
        return False, "Unable to connect"
Beispiel #24
0
 def _get_showrss_id(cls, tvdb_id):
     try:
         unusedVar = cls.knownShows
     except AttributeError:
         cls.knownShows = {}
         
     try:
         cachedResult = cls.knownShows[str(tvdb_id)]
         if time.time() < (cachedResult['mtime'] + UPDATE_INTERVAL):
             # cached result is still considered current, use it
             return cachedResult['response']
         # otherwise we just fall through to lookup
     except KeyError:
         pass    # no cached value, just fall through to lookup
         
     
     url = SHOW_LOOKUP_URL + '?tvdb_id=' + str(tvdb_id)
     data = helpers.getURL(url)
     result = json.loads(data)
     if not result:
         logger.log(u"Empty lookup result -> failed to find show id", logger.DEBUG)
         return None
     if result['error']:
         logger.log(u"Lookup failed: " + result['errorMessage'], logger.DEBUG)
         return None
     
     # result is good, store it for later
     cls.knownShows[str(tvdb_id)] = {'mtime': time.time(), 
                                     'response': result['show']['showrss_id'] }
     
     return result['show']['showrss_id']
def splitResult(result):

    try:
        urlData = helpers.getURL(result.url)
    except urllib2.URLError, e:
        logger.log(u"Unable to load url "+result.url+", can't download season NZB", logger.ERROR)
        return False
Beispiel #26
0
def _xem_exceptions_fetcher():
    if shouldRefresh('xem'):
        for indexer in sickbeard.indexerApi().indexers:
            logger.log(u'Checking for XEM scene exception updates for {0}'.format
                       (sickbeard.indexerApi(indexer).name))

            url = 'http://thexem.de/map/allNames?origin={0}&seasonNumbers=1'.format(sickbeard.indexerApi(indexer).config['xem_origin'])

            parsedJSON = helpers.getURL(url, session=xem_session, timeout=90, returns='json')
            if not parsedJSON:
                logger.log(u'Check scene exceptions update failed for {0}, Unable to get URL: {1}'.format
                           (sickbeard.indexerApi(indexer).name, url), logger.DEBUG)
                continue

            if parsedJSON['result'] == 'failure':
                continue

            if not parsedJSON['data']:
                logger.log(u'No data returned from XEM when checking scene exceptions. Update failed for {0}'.format
                           (sickbeard.indexerApi(indexer).name), logger.DEBUG)
                continue

            for indexerid, names in iteritems(parsedJSON['data']):
                try:
                    xem_exception_dict[int(indexerid)] = names
                except Exception as error:
                    logger.log(u'XEM: Rejected entry: indexerid:{0}; names:{1}'.format(indexerid, names), logger.WARNING)
                    logger.log(u'XEM: Rejected entry error message:{0}'.format(error), logger.DEBUG)

        setLastRefresh('xem')

    return xem_exception_dict
Beispiel #27
0
def getShowImage(url, imgNum=None):

    est = eec.set(getShowImage, str(url))
    image_data = None

    if url == None:
        eec.clock(est, False)
        return None

    # if they provided a fanart number try to use it instead
    if imgNum != None:
        tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
    else:
        tempURL = url

    logger.log(u"Getting show image at "+tempURL, logger.DEBUG)

    image_data = helpers.getURL(tempURL)

    if image_data is None:
        logger.log(u"There was an error trying to retrieve the image, aborting", logger.ERROR)
        eec.clock(est, False)
        return None

    eec.clock(est, True)
    return image_data
Beispiel #28
0
def _xem_exceptions_fetcher():
    global xem_exception_dict

    xem_list = 'xem_us'
    for show in sickbeard.showList:
        if show.is_anime and not show.paused:
            xem_list = 'xem'
            break

    if shouldRefresh(xem_list):
        for indexer in [i for i in sickbeard.indexerApi().indexers if 'xem_origin' in sickbeard.indexerApi(i).config]:
            logger.log(u'Checking for XEM scene exception updates for %s' % sickbeard.indexerApi(indexer).name)

            url = 'http://thexem.de/map/allNames?origin=%s%s&seasonNumbers=1'\
                  % (sickbeard.indexerApi(indexer).config['xem_origin'], ('&language=us', '')['xem' == xem_list])

            parsed_json = helpers.getURL(url, json=True, timeout=90)
            if not parsed_json:
                logger.log(u'Check scene exceptions update failed for %s, Unable to get URL: %s'
                           % (sickbeard.indexerApi(indexer).name, url), logger.ERROR)
                continue

            if 'failure' == parsed_json['result']:
                continue

            for indexerid, names in parsed_json['data'].items():
                try:
                    xem_exception_dict[int(indexerid)] = names
                except:
                    continue

        setLastRefresh(xem_list)

    return xem_exception_dict
Beispiel #29
0
def _xem_get_ids(indexer_name, xem_origin):
    xem_ids = []

    url = 'http://thexem.de/map/havemap?origin=%s' % xem_origin

    task = 'Fetching show ids with%s xem scene mapping%s for origin'
    logger.log(u'%s %s' % (task % ('', 's'), indexer_name))
    parsed_json = helpers.getURL(url, json=True, timeout=90)
    if not parsed_json:
        logger.log(u'Failed %s %s, Unable to get URL: %s'
                   % (task.lower() % ('', 's'), indexer_name, url), logger.ERROR)
    else:
        if 'result' in parsed_json and 'success' == parsed_json['result'] and 'data' in parsed_json:
            try:
                for indexerid in parsed_json['data']:
                    xem_id = helpers.tryInt(indexerid)
                    if xem_id and xem_id not in xem_ids:
                        xem_ids.append(xem_id)
            except:
                pass
            if 0 == len(xem_ids):
                logger.log(u'Failed %s %s, no data items parsed from URL: %s'
                           % (task.lower() % ('', 's'), indexer_name, url), logger.WARNING)

    logger.log(u'Finished %s %s' % (task.lower() % (' %s' % len(xem_ids), helpers.maybe_plural(len(xem_ids))),
                                    indexer_name))
    return xem_ids
Beispiel #30
0
def _xem_exceptions_fetcher():

    exception_dict = {}

    if shouldRefresh('xem'):
        success = False
        for indexer in sickbeard.indexerApi().indexers:
            logger.log(u"Checking for XEM scene exception updates for " + sickbeard.indexerApi(indexer).name)

            url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickbeard.indexerApi(indexer).config[
                'xem_origin']

            url_data = helpers.getURL(url, json=True)
            if url_data is None:
                logger.log(u"Check scene exceptions update failed for " + sickbeard.indexerApi(
                    indexer).name + ", Unable to get URL: " + url, logger.ERROR)
                continue

            if url_data['result'] == 'failure':
                continue

            for indexerid, names in url_data['data'].items():
                exception_dict[int(indexerid)] = names

            success = True

        if success:
            setLastRefresh('xem')

    return exception_dict
Beispiel #31
0
    def check_for_new_news(self, force=False):
        """
        Checks GitHub for the latest news.

        returns: str, a copy of the news

        force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
        """

        if not self.updater or not sickbeard.VERSION_NOTIFY and not sickbeard.AUTO_UPDATE and not force:
            logger.log(u"check_for_new_news: Version checking is disabled, not checking for latest news")
            return ''

        # Grab a copy of the news
        logger.log(u'check_for_new_news: Checking GitHub for latest news.', logger.DEBUG)
        try:
            news = helpers.getURL(sickbeard.NEWS_URL, session=requests.Session())
        except Exception:
            logger.log(u'check_for_new_news: Could not load news from repo.', logger.WARNING)

        last_read = time.mktime(time.strptime(sickbeard.NEWS_LAST_READ, '%Y-%m-%d'))
        dates= re.finditer(r'^####(\d{4}-\d{2}-\d{2})####$', news, re.M)

        sickbeard.NEWS_UNREAD = 0
        gotLatest = False
        for match in dates:
            if not gotLatest:
                gotLatest = True
                sickbeard.NEWS_LATEST = match.group(1)

            if time.mktime(time.strptime(match.group(1), '%Y-%m-%d')) > last_read:
                sickbeard.NEWS_UNREAD += 1

        return news
Beispiel #32
0
    def test_search(self):
        self.url = 'http://kickass.to/'
        searchURL = 'http://kickass.to/usearch/American%20Dad%21%20S08%20-S08E%20category%3Atv/?field=seeders&sorder=desc'

        html = getURL(searchURL)
        if not html:
            return

        soup = BeautifulSoup(html, features=["html5lib", "permissive"])

        torrent_table = soup.find('table', attrs={'class': 'data'})
        torrent_rows = torrent_table.find_all('tr') if torrent_table else []

        #Continue only if one Release is found
        if len(torrent_rows) < 2:
            print(u"The data returned does not contain any torrents")
            return

        for tr in torrent_rows[1:]:

            try:
                link = urlparse.urljoin(self.url, (tr.find('div', {'class': 'torrentname'}).find_all('a')[1])['href'])
                id = tr.get('id')[-7:]
                title = (tr.find('div', {'class': 'torrentname'}).find_all('a')[1]).text \
                    or (tr.find('div', {'class': 'torrentname'}).find_all('a')[2]).text
                url = tr.find('a', 'imagnet')['href']
                verified = True if tr.find('a', 'iverify') else False
                trusted = True if tr.find('img', {'alt': 'verified'}) else False
                seeders = int(tr.find_all('td')[-2].text)
                leechers = int(tr.find_all('td')[-1].text)
            except (AttributeError, TypeError):
                continue

            print title
Beispiel #33
0
def _xem_exceptions_fetcher():
    global xem_exception_dict

    if shouldRefresh('xem'):
        for indexer in sickbeard.indexerApi().indexers:
            logger.log(u"Checking for XEM scene exception updates for " + sickbeard.indexerApi(indexer).name)

            url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickbeard.indexerApi(indexer).config[
                'xem_origin']

            parsedJSON = helpers.getURL(url, json=True)
            if not parsedJSON:
                logger.log(u"Check scene exceptions update failed for " + sickbeard.indexerApi(
                    indexer).name + ", Unable to get URL: " + url, logger.ERROR)
                continue

            if parsedJSON['result'] == 'failure':
                continue

            for indexerid, names in parsedJSON['data'].items():
                xem_exception_dict[int(indexerid)] = names

        setLastRefresh('xem')

    return xem_exception_dict
Beispiel #34
0
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = helpers.getURL(self.url, session=self.session, params=self.params, headers={'Referer': 'http://akas.imdb.com/'}, returns='text')
        if not data:
            return None

        soup = BeautifulSoup(data, 'html5lib')
        results = soup.find("table", {"class": "results"})
        rows = results("tr")

        for row in rows:
            show = {}
            image_td = row.find("td", {"class": "image"})

            if image_td:
                image = image_td.find("img")
                show['image_url_large'] = self.change_size(image['src'], 3)
                show['image_path'] = ek(posixpath.join, 'images', 'imdb_popular', ek(os.path.basename, show['image_url_large']))

                self.cache_image(show['image_url_large'])

            td = row.find("td", {"class": "title"})

            if td:
                show['name'] = td.find("a").contents[0]
                show['imdb_url'] = "http://akas.imdb.com" + td.find("a")["href"]
                show['imdb_tt'] = show['imdb_url'][-10:][0:9]
                show['year'] = td.find("span", {"class": "year_type"}).contents[0].split(" ")[0][1:]

                rating_all = td.find("div", {"class": "user_rating"})
                if rating_all:
                    rating_string = rating_all.find("div", {"class": "rating rating-list"})
                    if rating_string:
                        rating_string = rating_string['title']

                        match = re.search(r".* (.*)\/10.*\((.*)\).*", rating_string)
                        if match:
                            matches = match.groups()
                            show['rating'] = matches[0]
                            show['votes'] = matches[1]
                        else:
                            show['rating'] = None
                            show['votes'] = None
                else:
                    show['rating'] = None
                    show['votes'] = None

                outline = td.find("span", {"class": "outline"})
                if outline:
                    show['outline'] = outline.contents[0]
                else:
                    show['outline'] = ''

                popular_shows.append(show)

        return popular_shows
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = helpers.getURL(self.url, session=self.session, params=self.params, headers={'Referer': 'http://akas.imdb.com/'}, returns='text')
        if not data:
            return None

        soup = BeautifulSoup(data, 'html5lib')
        results = soup.find("table", {"class": "results"})
        rows = results("tr")

        for row in rows:
            show = {}
            image_td = row.find("td", {"class": "image"})

            if image_td:
                image = image_td.find("img")
                show['image_url_large'] = self.change_size(image['src'], 3)
                show['image_path'] = ek(posixpath.join, 'images', 'imdb_popular', ek(os.path.basename, show['image_url_large']))

                self.cache_image(show['image_url_large'])

            td = row.find("td", {"class": "title"})

            if td:
                show['name'] = td.find("a").contents[0]
                show['imdb_url'] = "http://akas.imdb.com" + td.find("a")["href"]
                show['imdb_tt'] = show['imdb_url'][-10:][0:9]
                show['year'] = td.find("span", {"class": "year_type"}).contents[0].split(" ")[0][1:]

                rating_all = td.find("div", {"class": "user_rating"})
                if rating_all:
                    rating_string = rating_all.find("div", {"class": "rating rating-list"})
                    if rating_string:
                        rating_string = rating_string['title']

                        match = re.search(r".* (.*)\/10.*\((.*)\).*", rating_string)
                        if match:
                            matches = match.groups()
                            show['rating'] = matches[0]
                            show['votes'] = matches[1]
                        else:
                            show['rating'] = None
                            show['votes'] = None
                else:
                    show['rating'] = None
                    show['votes'] = None

                outline = td.find("span", {"class": "outline"})
                if outline:
                    show['outline'] = outline.contents[0]
                else:
                    show['outline'] = ''

                popular_shows.append(show)

        return popular_shows
Beispiel #36
0
    def _getTVRageInfo(self, season=None, episode=None, full=False):

        url = "http://services.tvrage.com/tools/quickinfo.php?"

        # if we need full info OR if we don't have a tvrage id, use show name
        if full == True or self.show.tvrid == 0:
            if self.show.tvrname != "" and self.show.tvrname != None:
                showName = self.show.tvrname
            else:
                showName = self.show.name

            urlData = {'show': showName.encode('utf-8')}

        # if we don't need full info and we have a tvrage id, use it
        else:
            urlData = {'sid': self.show.tvrid}

        if season != None and episode != None:
            urlData['ep'] = str(season) + 'x' + str(episode)

        # build the URL
        url += urllib.urlencode(urlData)

        logger.log(u"Loading TVRage info from URL: " + url, logger.DEBUG)
        result = helpers.getURL(url)

        if result is None:
            raise exceptions.TVRageException("urlopen call to " + url +
                                             " failed")
        else:
            result = result.decode('utf-8')

        urlData = result.splitlines()
        info = {}

        for x in urlData:
            if x.startswith("No Show Results Were Found"):
                logger.log(u"TVRage returned: " + x.encode('utf-8'),
                           logger.WARNING)
                return info

            if "@" in x:
                key, value = x.split("@")
                if key:
                    key = key.replace('<pre>', '')
                    info[key] = value.strip()
            else:
                logger.log(u"TVRage returned: " + x.encode('utf-8'),
                           logger.WARNING)
                return info

        # save it for later in case somebody is curious
        if 'Show ID' in info:
            self._tvrid = info['Show ID']

        if 'Show Name' in info:
            self._tvrname = info['Show Name']

        return info
Beispiel #37
0
 def _extract_name_from_torrent(self, url):
     try:
         contents = helpers.getURL(url, [])
     except (urllib2.HTTPError, IOError), e:
         logger.log(
             u"Error loading " + self.name + " URL: " +
             str(sys.exc_info()) + " - " + str(e), logger.ERROR)
         return None
Beispiel #38
0
    def getURL(self, url, post_data=None, params=None, timeout=30, json=False, needBytes=False):
        """
        By default this is just a simple urlopen call but this method should be overridden
        for providers with special URL requirements (like cookies)
        """

        return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
                              session=self.session, json=json, needBytes=needBytes)
Beispiel #39
0
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = helpers.getURL(self.url,
                              session=self.session,
                              params=self.params,
                              headers={'Referer': 'http://akas.imdb.com/'},
                              returns='text')
        if not data:
            return None

        soup = BeautifulSoup(data, 'html5lib')
        results = soup.find_all("div", {"class": "lister-item"})

        for row in results:
            show = {}
            image_div = row.find("div", {"class": "lister-item-image"})
            if image_div:
                image = image_div.find("img")
                show['image_url_large'] = self.change_size(
                    image['loadlate'], 3)
                show['imdb_tt'] = image['data-tconst']
                show['image_path'] = ek(
                    posixpath.join, 'images', 'imdb_popular',
                    ek(os.path.basename, show['image_url_large']))
                self.cache_image(show['image_url_large'])

            content = row.find("div", {"class": "lister-item-content"})
            if content:
                header = row.find("h3", {"class": "lister-item-header"})
                if header:
                    a_tag = header.find("a")
                    if a_tag:
                        show['name'] = a_tag.get_text(strip=True)
                        show[
                            'imdb_url'] = "http://www.imdb.com" + a_tag["href"]
                        show['year'] = header.find("span", {
                            "class": "lister-item-year"
                        }).contents[0].split(" ")[0][1:].strip("-")

                imdb_rating = row.find("div", {"class": "ratings-imdb-rating"})
                show['rating'] = imdb_rating[
                    'data-value'] if imdb_rating else None

                votes = row.find("span", {"name": "nv"})
                show['votes'] = votes['data-value'] if votes else None

                outline = content.find_all("p", {"class": "text-muted"})
                if outline and len(outline) >= 2:
                    show['outline'] = outline[1].contents[0].strip("\"")
                else:
                    show['outline'] = ''

                popular_shows.append(show)

        return popular_shows
Beispiel #40
0
    def _getTVRageInfo(self, season=None, episode=None, full=False):

        url = "http://services.tvrage.com/tools/quickinfo.php?"

        if full or self.show.tvrid == 0:
            # for some reason, the previous code here forced the use of the tvrage slug
            # rather than the tvrage_id when 'full' was True.  I expect there was a
            # reason for this, so best to do the same.
            if self.show.tvrname != "" and self.show.tvrname != None:
                showName = self.show.tvrname
            else:
                showName = self.show.name
            urlData = {'show': showName.encode('utf-8')}

            if not full:  # as per above, only use tvtumbler if not 'full'
                tvtumb = tvtumbler.show_info(self.show.tvdbid)
                if tvtumb and 'tvrage_id' in tvtumb and tvtumb['tvrage_id']:
                    urlData = {'sid': tvtumb['tvrage_id']}

        # if we don't need full info and we have a tvrage id, use it
        else:
            urlData = {'sid': self.show.tvrid}

        if season != None and episode != None:
            urlData['ep'] = str(season) + 'x' + str(episode)

        # build the URL
        url += urllib.urlencode(urlData)

        logger.log(u"Loading TVRage info from URL: " + url, logger.DEBUG)
        result = helpers.getURL(url)

        if result is None:
            raise exceptions.TVRageException("urlopen call to " + url +
                                             " failed")
        else:
            result = result.decode('utf-8')

        urlData = result.splitlines()

        info = {}

        for x in urlData:
            if x.startswith("No Show Results Were Found"):
                logger.log(x.encode('utf-8'), logger.WARNING)
                return info
            key, value = x.split("@")
            key = key.replace('<pre>', '')
            info[key] = value.strip()

        # save it for later in case somebody is curious
        if info.has_key('Show ID'):
            self._tvrid = info['Show ID']

        if info.has_key('Show Name'):
            self._tvrname = info['Show Name']

        return info
Beispiel #41
0
def update_network_dict():

    _update_zoneinfo()

    d = {}

    # network timezones are stored on github pages
    url = 'http://github.com/Prinz23/sb_network_timezones/raw/master/network_timezones.txt'

    url_data = helpers.getURL(url)

    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(
            u"Loading Network Timezones update failed. Unable to get URL: " +
            url, logger.ERROR)
        return

    try:
        for line in url_data.splitlines():
            (key, val) = line.decode('utf-8').strip().rsplit(u':', 1)
            if key == None or val == None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    myDB = db.DBConnection("cache.db")
    # load current network timezones
    old_d = dict(myDB.select("SELECT * FROM network_timezones"))

    # list of sql commands to update the network_timezones table
    ql = []
    for cur_d, cur_t in d.iteritems():
        h_k = old_d.has_key(cur_d)
        if h_k and cur_t != old_d[cur_d]:
            # update old record
            ql.append([
                "UPDATE network_timezones SET network_name=?, timezone=? WHERE network_name=?",
                [cur_d, cur_t, cur_d]
            ])
        elif not h_k:
            # add new record
            ql.append([
                "INSERT INTO network_timezones (network_name, timezone) VALUES (?,?)",
                [cur_d, cur_t]
            ])
        if h_k:
            del old_d[cur_d]
    # remove deleted records
    if len(old_d) > 0:
        L = list(va for va in old_d)
        ql.append([
            "DELETE FROM network_timezones WHERE network_name IN (" +
            ','.join(['?'] * len(L)) + ")", L
        ])
    # change all network timezone infos at once (much faster)
    myDB.mass_action(ql)
Beispiel #42
0
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://sickchill.github.io/sb_network_timezones/network_timezones.txt'
    data = helpers.getURL(url, session=helpers.make_session(), returns='text')
    if not data:
        logger.log(
            'Updating network timezones failed, this can happen from time to time. URL: {0}'
            .format(url), logger.WARNING)
        load_network_dict()
        return

    d = {}
    try:
        for line in data.splitlines():
            (key, val) = line.strip().rsplit(':', 1)
            if key and val:
                d[key.lower()] = val
    except (IOError, OSError):
        pass

    if not d:
        logger.log(
            'Parsing network timezones failed, not going to touch the db',
            logger.WARNING)
        load_network_dict()
        return

    cache_db_con = db.DBConnection('cache.db')

    network_list = dict(
        cache_db_con.select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in six.iteritems(d):
        existing = network in network_list
        if not existing:
            queries.append([
                'INSERT OR IGNORE INTO network_timezones VALUES (?,?);',
                [network, timezone]
            ])
        elif network_list[network] != timezone:
            queries.append([
                'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                [timezone, network]
            ])

        if existing:
            del network_list[network]

    for network in network_list:
        queries.append([
            'DELETE FROM network_timezones WHERE network_name = ?;', [network]
        ])

    if queries:
        cache_db_con.mass_action(queries)
        load_network_dict()
Beispiel #43
0
 def get_devices(self, pushbullet_api):
     logger.log(
         'Testing Pushbullet authentication and retrieving the device list.',
         logger.DEBUG)
     headers = {'Access-Token': pushbullet_api}
     return helpers.getURL(urljoin(self.url, 'devices'),
                           session=self.session,
                           headers=headers,
                           returns='text') or {}
Beispiel #44
0
def splitResult(result):

    try:
        urlData = helpers.getURL(result.url)
    except urllib2.URLError, e:
        logger.log(
            u"Unable to load url " + result.url +
            ", can't download season NZB", logger.ERROR)
        return False
def retrieve_exceptions():
    """
    Looks up the exceptions on github, parses them into a dict, and inserts them into the
    scene_exceptions table in cache.db. Also clears the scene name cache.
    """

    exception_dict = {}

    # exceptions are stored on github pages
    url = 'http://xbianonpi.github.com/sb_tvdb_scene_exceptions/exceptions.txt'

    logger.log(u"Check scene exceptions update")
    url_data = helpers.getURL(url)

    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(u"Check scene exceptions update failed. Unable to get URL: " + url, logger.ERROR)
        return

    else:
        # each exception is on one line with the format tvdb_id: 'show name 1', 'show name 2', etc
        for cur_line in url_data.splitlines():
            cur_line = cur_line.decode('utf-8')
            tvdb_id, sep, aliases = cur_line.partition(':') #@UnusedVariable

            if not aliases:
                continue

            tvdb_id = int(tvdb_id)

            # regex out the list of shows, taking \' into account
            alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]

            exception_dict[tvdb_id] = alias_list

        myDB = db.DBConnection("cache.db")

        changed_exceptions = False

        # write all the exceptions we got off the net into the database
        for cur_tvdb_id in exception_dict:

            # get a list of the existing exceptions for this ID
            existing_exceptions = [x["show_name"] for x in myDB.select("SELECT * FROM scene_exceptions WHERE tvdb_id = ?", [cur_tvdb_id])]

            for cur_exception in exception_dict[cur_tvdb_id]:
                # if this exception isn't already in the DB then add it
                if cur_exception not in existing_exceptions:
                    myDB.action("INSERT INTO scene_exceptions (tvdb_id, show_name) VALUES (?,?)", [cur_tvdb_id, cur_exception])
                    changed_exceptions = True

        # since this could invalidate the results of the cache we clear it out after updating
        if changed_exceptions:
            logger.log(u"Updated scene exceptions")
            name_cache.clearCache()
        else:
            logger.log(u"No scene exceptions update needed")
Beispiel #46
0
def _update_zoneinfo():

    # now check if the zoneinfo needs update
    url_zv = 'http://github.com/Prinz23/sb_network_timezones/raw/master/zoneinfo.txt'

    url_data = helpers.getURL(url_zv)

    if url_data is None:
        # When urlData is None, trouble connecting to github
        logger.log(u"Loading zoneinfo.txt failed. Unable to get URL: " + url_zv, logger.DEBUG)
        return

    if (lib.dateutil.zoneinfo.ZONEINFOFILE != None):
        cur_zoneinfo = ek.ek(basename, lib.dateutil.zoneinfo.ZONEINFOFILE)
    else:
        cur_zoneinfo = None
    (new_zoneinfo, zoneinfo_md5) = url_data.decode('utf-8').strip().rsplit(u' ')

    if ((cur_zoneinfo != None) and (new_zoneinfo == cur_zoneinfo)):
        return

    # now load the new zoneinfo
    url_tar = u'http://github.com/Prinz23/sb_network_timezones/raw/master/' + new_zoneinfo
    zonefile = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + new_zoneinfo)
    zonefile_tmp = re.sub(r"\.tar\.gz$",'.tmp', zonefile)

    if (os.path.exists(zonefile_tmp)):
        try:
            os.remove(zonefile_tmp)
        except:
            logger.log(u"Unable to delete: " + zonefile_tmp,logger.ERROR)
            return

    if not helpers.download_file(url_tar, zonefile_tmp):
        return

    new_hash = str(helpers.md5_for_file(zonefile_tmp))

    if (zoneinfo_md5.upper() == new_hash.upper()):
        logger.log(u"Updating timezone info with new one: " + new_zoneinfo,logger.MESSAGE)
        try:
            # remove the old zoneinfo file
            if (cur_zoneinfo != None):
                old_file = ek.ek(realpath, u'lib/dateutil/zoneinfo/' + cur_zoneinfo)
                if (os.path.exists(old_file)):
                    os.remove(old_file)
            # rename downloaded file
            os.rename(zonefile_tmp,zonefile)
            # load the new zoneinfo
            reload(lib.dateutil.zoneinfo)
        except:
            _remove_zoneinfo_failed(zonefile_tmp)
            return
    else:
        _remove_zoneinfo_failed(zonefile_tmp)
        logger.log(u"MD5 HASH doesn't match: " + zoneinfo_md5.upper() + ' File: ' + new_hash.upper(),logger.ERROR)
        return
Beispiel #47
0
def can_reject(release_name):
    """
    Check if a release name should be rejected at external services.
    If any site reports result as a valid scene release, then return None, None.
    If predb reports result as nuked, then return nuke reason and url attempted.
    If fail to find result at all services, return reject and url details for each site.

    :param release_name: Release title
    :type release_name: String
    :return: None, None if release has no issue otherwise True/Nuke reason, URLs that rejected
    :rtype: Tuple (None, None or True/String, String)
    """
    rej_urls = []
    srrdb_url = 'https://www.srrdb.com/api/search/r:%s/order:date-desc' % re.sub('\]\[', '', release_name)
    resp = helpers.getURL(srrdb_url, json=True)
    if not resp:
        srrdb_rej = True
        rej_urls += ['Failed contact \'%s\'' % srrdb_url]
    else:
        srrdb_rej = (not len(resp.get('results', []))
                     or release_name.lower() != resp.get('results', [{}])[0].get('release', '').lower())
        rej_urls += ([], ['\'%s\'' % srrdb_url])[srrdb_rej]

    sane_name = helpers.full_sanitizeSceneName(release_name)
    predb_url = 'https://predb.ovh/api/v1/?q=@name "%s"' % sane_name
    resp = helpers.getURL(predb_url, json=True)
    predb_rej = True
    if not resp:
        rej_urls += ['Failed contact \'%s\'' % predb_url]
    elif 'success' == resp.get('status', '').lower():
        rows = resp and (resp.get('data') or {}).get('rows') or []
        for data in rows:
            if sane_name == helpers.full_sanitizeSceneName((data.get('name', '') or '').strip()):
                nuke_type = (data.get('nuke') or {}).get('type')
                if not nuke_type:
                    predb_rej = not helpers.tryInt(data.get('preAt'))
                else:
                    predb_rej = 'un' not in nuke_type and data.get('nuke', {}).get('reason', 'Reason not set')
                break
        rej_urls += ([], ['\'%s\'' % predb_url])[bool(predb_rej)]

    pred = any([not srrdb_rej, not predb_rej])

    return pred and (None, None) or (predb_rej or True,  ', '.join(rej_urls))
Beispiel #48
0
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://raw.githubusercontent.com/pymedusa/sickrage.github.io/master/sb_network_timezones/network_timezones.txt'
    url_data = helpers.getURL(url,
                              session=helpers.make_session(),
                              returns='text')
    if not url_data:
        logger.log(
            u'Updating network timezones failed, this can happen from time to time. URL: %s'
            % url, logger.WARNING)
        load_network_dict()
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.strip().rsplit(u':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    cache_db_con = db.DBConnection('cache.db')

    network_list = dict(
        cache_db_con.select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in d.iteritems():
        existing = network in network_list
        if not existing:
            queries.append([
                'INSERT OR IGNORE INTO network_timezones VALUES (?,?);',
                [network, timezone]
            ])
        elif network_list[network] != timezone:
            queries.append([
                'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                [timezone, network]
            ])

        if existing:
            del network_list[network]

    if network_list:
        purged = [x for x in network_list]
        queries.append([
            'DELETE FROM network_timezones WHERE network_name IN (%s);' %
            ','.join(['?'] * len(purged)), purged
        ])

    if queries:
        cache_db_con.mass_action(queries)
        load_network_dict()
Beispiel #49
0
    def _get_auth(self):

        self.auth = (6 < self.api_version()
                     and 'Ok' in helpers.getURL('%slogin' % self.host,
                                                session=self.session,
                                                post_data={
                                                    'username': self.username,
                                                    'password': self.password
                                                }))
        return self.auth
Beispiel #50
0
 def get_channels(self, pushbullet_api):
     """Fetches the list of channels a given access key has permissions to push to"""
     logger.log(
         'Testing Pushbullet authentication and retrieving the device list.',
         logger.DEBUG)
     headers = {'Access-Token': pushbullet_api}
     return helpers.getURL(urljoin(self.url, 'channels'),
                           session=self.session,
                           headers=headers,
                           returns='text') or {}
Beispiel #51
0
def update_network_dict():
    """Update timezone information from SR repositories"""
    _remove_old_zoneinfo()
    _update_zoneinfo()

    url = 'http://sickragetv.github.io/sb_network_timezones/network_timezones.txt'
    url_data = helpers.getURL(url, session=requests.Session())
    if url_data is None:
        logger.log(
            u'Updating network timezones failed, this can happen from time to time. URL: %s'
            % url, logger.WARNING)
        load_network_dict()
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.decode('utf-8').strip().rsplit(u':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    my_db = db.DBConnection('cache.db')

    network_list = dict(my_db.select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in d.iteritems():
        existing = network_list.has_key(network)
        if not existing:
            queries.append([
                'INSERT OR IGNORE INTO network_timezones VALUES (?,?);',
                [network, timezone]
            ])
        elif network_list[network] is not timezone:
            queries.append([
                'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                [timezone, network]
            ])

        if existing:
            del network_list[network]

    if network_list:
        purged = list(x for x in network_list)
        queries.append([
            'DELETE FROM network_timezones WHERE network_name IN (%s);' %
            ','.join(['?'] * len(purged)), purged
        ])

    if queries:
        my_db.mass_action(queries)
        load_network_dict()
Beispiel #52
0
    def _authorised(self, **kwargs):

        if not super(TransmithenetProvider, self)._authorised(
                logged_in=(lambda x=None: self.has_all_cookies('session')),
                post_params={'keeplogged': '1', 'login': '******'}):
            return False
        if not self.user_authkey:
            response = helpers.getURL(self.urls['user'], session=self.session, json=True)
            if 'response' in response:
                self.user_authkey, self.user_passkey = [response['response'].get(v) for v in 'authkey', 'passkey']
        return self.user_authkey
Beispiel #53
0
 def get_url(self,
             url,
             post_data=None,
             params=None,
             timeout=30,
             json=False,
             need_bytes=False,
             **kwargs):  # pylint: disable=too-many-arguments,
     kwargs['hooks'] = {'response': self.get_url_hook}
     return getURL(url, post_data, params, self.headers, timeout,
                   self.session, json, need_bytes, **kwargs)
Beispiel #54
0
    def _sendPushbullet(  # pylint: disable=too-many-arguments
            self,
            pushbullet_api=None,
            pushbullet_device=None,
            pushbullet_channel=None,
            event=None,
            message=None,
            link=None,
            force=False):

        if not (sickbeard.USE_PUSHBULLET or force):
            return False

        pushbullet_api = pushbullet_api or sickbeard.PUSHBULLET_API
        pushbullet_device = pushbullet_device or sickbeard.PUSHBULLET_DEVICE
        pushbullet_channel = pushbullet_channel or sickbeard.PUSHBULLET_CHANNEL

        logger.log('Pushbullet event: {0!r}'.format(event), logger.DEBUG)
        logger.log('Pushbullet message: {0!r}'.format(message), logger.DEBUG)
        logger.log('Pushbullet api: {0!r}'.format(pushbullet_api),
                   logger.DEBUG)
        logger.log('Pushbullet devices: {0!r}'.format(pushbullet_device),
                   logger.DEBUG)

        post_data = {
            'title': event,
            'body': message,
            'type': 'link' if link else 'note'
        }
        if link:
            post_data['url'] = link

        headers = {'Access-Token': pushbullet_api}

        if pushbullet_device:
            post_data['device_iden'] = pushbullet_device
        elif pushbullet_channel:
            post_data['channel_tag'] = pushbullet_channel

        response = helpers.getURL(urljoin(self.url, 'pushes'),
                                  session=self.session,
                                  post_data=post_data,
                                  headers=headers,
                                  returns='json') or {}

        failed = response.pop('error', {})
        if failed:
            logger.log(
                'Pushbullet notification failed: {0}'.format(
                    failed.pop('message')), logger.WARNING)
        else:
            logger.log('Pushbullet notification sent.', logger.DEBUG)

        return False if failed else True
Beispiel #55
0
    def run(self, force=False):  # pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements

        if self.amActive:
            return

        self.amActive = True

        update_timestamp = time.mktime(datetime.datetime.now().timetuple())
        cache_db_con = db.DBConnection('cache.db')
        result = cache_db_con.select('SELECT `time` FROM lastUpdate WHERE provider = ?', ['theTVDB'])
        if result:
            last_update = int(result[0][0])
        else:
            last_update = int(time.mktime(datetime.datetime.min.timetuple()))
            cache_db_con.action('INSERT INTO lastUpdate (provider, `time`) VALUES (?, ?)', ['theTVDB', last_update])

        network_timezones.update_network_dict()

        url = 'http://thetvdb.com/api/Updates.php?type=series&time={0}'.format(last_update)
        data = helpers.getURL(url, session=self.session, returns='text', hooks={'response': self.request_hook})
        if not data:
            logger.log('Could not get the recently updated show data from {0}. Retrying later. Url was: {1}'.format(sickbeard.indexerApi(INDEXER_TVDB).name, url))
            self.amActive = False
            return

        updated_shows = set()
        try:
            tree = etree.fromstring(data)
            for show in tree.findall('Series'):
                updated_shows.add(int(show.text))
        except SyntaxError:
            update_timestamp = last_update

        pi_list = []
        for cur_show in sickbeard.showList:
            if int(cur_show.indexer) in [INDEXER_TVRAGE]:
                logger.log('Indexer is no longer available for show [{0}] '.format(cur_show.name), logger.WARNING)
                continue

            try:
                cur_show.nextEpisode()
                if sickbeard.indexerApi(cur_show.indexer).name == 'theTVDB':
                    if cur_show.indexerid in updated_shows:
                        pi_list.append(sickbeard.showQueueScheduler.action.update_show(cur_show, True))
                    else:
                        pi_list.append(sickbeard.showQueueScheduler.action.refresh_show(cur_show, False))
            except (CantUpdateShowException, CantRefreshShowException) as error:
                logger.log('Automatic update failed: {0}'.format(ex(error)), logger.DEBUG)

        ui.ProgressIndicators.setIndicator('dailyUpdate', ui.QueueProgressIndicator('Daily Update', pi_list))

        cache_db_con.action('UPDATE lastUpdate SET `time` = ? WHERE provider=?', [update_timestamp, 'theTVDB'])

        self.amActive = False
Beispiel #56
0
def load_network_conversions():

    if not should_try_loading():
        return

    conversions = []

    # network conversions are stored on github pages
    url = 'https://raw.githubusercontent.com/prinz23/sg_network_conversions/master/conversions.txt'

    url_data = helpers.getURL(url)
    if url_data is None:
        update_last_retry()
        # When urlData is None, trouble connecting to github
        logger.log(u'Updating network conversions failed, this can happen from time to time. URL: %s' % url, logger.WARNING)
        return
    else:
        reset_last_retry()

    try:
        for line in url_data.splitlines():
            (tvdb_network, tvrage_network, tvrage_country) = line.decode('utf-8').strip().rsplit(u'::', 2)
            if not (tvdb_network and tvrage_network and tvrage_country):
                continue
            conversions.append({'tvdb_network': tvdb_network, 'tvrage_network': tvrage_network, 'tvrage_country': tvrage_country})
    except (IOError, OSError):
        pass

    my_db = db.DBConnection('cache.db')

    old_d = my_db.select('SELECT * FROM network_conversions')
    old_d = helpers.build_dict(old_d, 'tvdb_network')

    # list of sql commands to update the network_conversions table
    cl = []

    for n_w in conversions:
        cl.append(['INSERT OR REPLACE INTO network_conversions (tvdb_network, tvrage_network, tvrage_country)'
                   'VALUES (?,?,?)', [n_w['tvdb_network'], n_w['tvrage_network'], n_w['tvrage_country']]])
        try:
            del old_d[n_w['tvdb_network']]
        except:
            pass

    # remove deleted records
    if len(old_d) > 0:
        old_items = list(va for va in old_d)
        cl.append(['DELETE FROM network_conversions WHERE tvdb_network'
                   ' IN (%s)' % ','.join(['?'] * len(old_items)), old_items])

    # change all network conversion info at once (much faster)
    if len(cl) > 0:
        my_db.mass_action(cl)
Beispiel #57
0
    def get_url(self, url, post_data=None, params=None, timeout=30, json=False):
        """
        By default this is just a simple urlopen call but this method should be overridden
        for providers with special URL requirements (like cookies)
        """

        # check for auth
        if not self._authorised():
            return

        return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
                              session=self.session, json=json, hooks=dict(response=self.cb_response))
Beispiel #58
0
    def _getURL(self, url, post_data=None, params=None, timeout=30, json=False):
        """
        By default this is just a simple urlopen call but this method should be overridden
        for providers with special URL requirements (like cookies)
        Not really changed much from the superclass, can be used in future.
        """

        # check for auth
        if not self._doLogin():
            return

        return helpers.getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
                              session=self.session, json=json)
Beispiel #59
0
    def get_new_token(self):
        token = sickbeard.THETVDB_V2_API_TOKEN.get('token', None)
        dt = sickbeard.THETVDB_V2_API_TOKEN.get(
            'datetime', datetime.datetime.fromordinal(1))
        url = '%s%s' % (self.config['base_url'], 'login')
        params = {'apikey': self.config['apikey']}
        resp = getURL(url.strip(), post_json=params, json=True)
        if resp:
            if 'token' in resp:
                token = resp['token']
                dt = datetime.datetime.now()

        return {'token': token, 'datetime': dt}
Beispiel #60
0
    def _do_login(self):

        logged_in = lambda: 'gft_uid' in self.session.cookies and 'gft_pass' in self.session.cookies
        if logged_in():
            return True

        if self._check_auth():
            helpers.getURL(self.urls['login_get'], session=self.session)
            login_params = {
                'username': self.username,
                'password': self.password
            }
            response = helpers.getURL(self.urls['login_post'],
                                      post_data=login_params,
                                      session=self.session)
            if response and logged_in():
                return True

            logger.log(
                u'Failed to authenticate with %s, abort provider.' % self.name,
                logger.ERROR)

        return False