Esempio n. 1
0
def title_from_id(identifier_key, identifier_value):
    if identifier_key is None or identifier_value is None:
        raise AttributeError("Neither identifier key nor value were supplied")
    try:
        if identifier_key == "imdbid":
            if identifier_value[0:2] != "tt":
                identifier_value = "tt%s" % identifier_value
            url = furl("http://www.omdbapi.com").add({"i": identifier_value, "plot": "short", "r": "json"}).tostr()
            omdb = webaccess.get(url)
            return omdb.json()["Title"]

        if identifier_key not in ("rid", "tvdbid"):
            raise AttributeError("Unknown identifier %s" % identifier_key)

        tvmaze_key = "tvrage" if identifier_key == "rid" else "thetvdb"
        tvmaze = webaccess.get(furl("http://api.tvmaze.com/lookup/shows").add({tvmaze_key: identifier_value}).url)
        if tvmaze.status_code == 404:
            #Unfortunately TVMaze returns a 404 for unknown/invalid IDs
            raise ExternalApiInfoException("Unable to find id %s and value %s at TVMaze" % (identifier_key, identifier_value))
        tvmaze.raise_for_status()
        
        return tvmaze.json()["name"]

    except (HTTPError, ConnectionError, ReadTimeout) as e:
        logger.exception("Unable to retrieve title by id %s and value %s" % (identifier_key, identifier_value))
        raise ExternalApiInfoException(str(e))
    except Exception as e:
        logger.exception("Unable to retrieve title by id %s and value %s" % (identifier_key, identifier_value))
        raise ExternalApiInfoException(str(e))
Esempio n. 2
0
 def test(self, setting):
     self.logger.debug("Testing connection to sabnzbd")
     try:
         f = self.get_sab(setting.url, setting.apikey, setting.username,
                          setting.password)
         f.add({"mode": "qstatus"})
         r = webaccess.get(f.tostr(), timeout=15)
         r.raise_for_status()
         if "state" in json.loads(r.text).keys():
             self.logger.info('Connection test to sabnzbd successful')
             return True, ""
         else:
             self.logger.info(
                 "Access to sabnzbd failed, probably due to wrong credentials"
             )
             return False, "Credentials wrong?"
     except DownloaderException as e:
         self.logger.error("Error while trying to connect to sabnzbd: %s" %
                           e)
         return False, str(e)
     except (SSLError, HTTPError, ConnectionError, ReadTimeout,
             InvalidSchema, MissingSchema) as e:
         self.logger.error("Error while trying to connect to sabnzbd: %s" %
                           e)
         return False, "SABnzbd is not responding"
Esempio n. 3
0
def test_connection(host, apikey):
    logger.info("Testing connection for host %s" % host)
    f = furl(host)
    f.path.add("api")
    f.query.add({"t": "tvsearch"})
    if apikey:
        f.query.add({"apikey": apikey})
    try:
        headers = {'User-Agent': config.settings.searching.userAgent}
        r = webaccess.get(f.url,
                          headers=headers,
                          timeout=config.settings.searching.timeout)
        r.raise_for_status()
        check_auth(r.text, None)
    except RequestException as e:
        logger.info("Unable to connect to indexer using URL %s: %s" %
                    (f.url, str(e)))
        return False, "Unable to connect to host"
    except IndexerAuthException as e:
        logger.info(
            "Unable to log in to indexer %s due to wrong credentials: %s" %
            (host, e.message))
        return False, e.message
    except IndexerAccessException as e:
        logger.info("Unable to log in to indexer %s. Unknown error %s." %
                    (host, str(e)))
        return False, "Host reachable but unknown error returned"
    return True, ""
Esempio n. 4
0
def test_connection(host, apikey, username=None, password=None):
    logger.info("Testing connection for host %s" % host)
    f = furl(host)
    f.path.add("api")
    f.query.add({"t": "tvsearch"})
    if apikey:
        f.query.add({"apikey": apikey})
    try:
        headers = {
            'User-Agent': config.settings.searching.userAgent
        }
        r = webaccess.get(f.url, headers=headers, timeout=config.settings.searching.timeout, auth=HTTPBasicAuth(username, password) if username is not None else None)
        r.raise_for_status()
        check_auth(r.text, None)
    except RequestException as e:
        logger.info("Unable to connect to indexer using URL %s: %s" % (f.url, str(e)))
        return False, "Unable to connect to host"
    except IndexerAuthException as e:
        logger.info("Unable to log in to indexer %s due to wrong credentials: %s" % (host, e.message))
        return False, e.message
    except IndexerAccessException as e:
        logger.info("Unable to log in to indexer %s. Unknown error %s." % (host, str(e)))
        return False, "Host reachable but unknown error returned"
    logger.info("Connection to host %s successful" % host)
    return True, ""
Esempio n. 5
0
def test_connection(host, apikey, username=None, password=None):
    logger.info("Testing connection for host %s" % host)
    f = furl(host)
    f.path.add("api")
    f.query.add({"t": "tvsearch"})
    if apikey:
        f.query.add({"apikey": apikey})
    try:
        headers = {'User-Agent': config.settings.searching.userAgent}
        r = webaccess.get(f.url,
                          headers=headers,
                          timeout=config.settings.searching.timeout,
                          auth=HTTPBasicAuth(username, password)
                          if username is not None else None)
        r.raise_for_status()
        check_auth(r.text, host)
    except RequestException as e:
        logger.info("Unable to connect to indexer using URL %s: %s" %
                    (f.url, str(e)))
        return False, "Unable to connect to host"
    except IndexerAuthException as e:
        logger.info(
            "Unable to log in to indexer %s due to wrong credentials: %s" %
            (host, e.message))
        return False, e.message
    except IndexerApiLimitReachedException as e:
        logger.info("Indexer %s due to wrong credentials: %s" %
                    (host, e.message))
        return False, e.message  # Description already contains "API limit reached" so returning the description should suffice
    except IndexerAccessException as e:
        logger.info("Unable to log in to indexer %s. Unknown error %s." %
                    (host, e.message))
        return False, "Host reachable but unknown error returned"
    logger.info("Connection to host %s successful" % host)
    return True, ""
Esempio n. 6
0
def _testId(host,
            apikey,
            t,
            idkey,
            idvalue,
            expectedResult,
            username=None,
            password=None):
    logger.info("Testing for ID capability \"%s\"" % idkey)

    try:
        url = _build_base_url(host, apikey, t, None, 50)
        url.query.add({idkey: idvalue})
        headers = {'User-Agent': config.settings.searching.userAgent}
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url,
                          timeout=config.settings.searching.timeout,
                          headers=headers,
                          auth=HTTPBasicAuth(username, password)
                          if username is not None else None)
        r.raise_for_status()
        logger.debug("Indexer returned: " + r.text[:500])
        check_auth(r.text, None)
        titles = []
        tree = ET.fromstring(r.content)
    except Exception as e:
        if isinstance(e, IndexerAccessException):
            raise
        else:
            logger.error("Error getting or parsing XML: %s" % e)
            raise IndexerAccessException("Error getting or parsing XML", None)
    for item in tree.find("channel").findall("item"):
        titles.append(item.find("title").text)

    #Hacky way of preventing nzb.su of shutting us down. If more than 5 requests are done in 6 seconds the indexer will block further requests for some time
    if "nzb.su" in host.lower():
        sleep(1)

    if len(titles) == 0:
        logger.debug("Search with t=%s and %s=%s returned no results" %
                     (t, idkey, idvalue))
        return False, t
    countWrong = 0
    for title in titles:
        title = title.lower()
        if expectedResult.lower() not in title:
            logger.debug(
                "Search with t=%s and %s=%s returned \"%s\" which does not contain the expected string \"%s\""
                % (t, idkey, idvalue, title, expectedResult))
            countWrong += 1
    percentWrong = (100 * countWrong) / len(titles)
    if percentWrong > 10:
        logger.info(
            "%d%% wrong results, this indexer probably doesn't support %s" %
            (percentWrong, idkey))
        return False, t
    logger.info("%d%% wrong results, this indexer probably supports %s" %
                (percentWrong, idkey))

    return True, t
Esempio n. 7
0
 def get(self, query, timeout=None, cookies=None):
     # overwrite for special handling, e.g. cookies
     return webaccess.get(query,
                          timeout=timeout,
                          cookies={
                              "agreed": "true",
                              "lang": "2"
                          })
Esempio n. 8
0
 def get_categories(self):
     self.logger.debug("Sending categories request to sabnzbd")
     f = self.get_sab()
     f.add({"mode": "get_cats", "output": "json"})
     try:
         r = webaccess.get(f.tostr(), timeout=15)
         r.raise_for_status()
         return r.json()["categories"]
     except (SSLError, HTTPError, ConnectionError, ReadTimeout, InvalidSchema, MissingSchema):
         self.logger.exception("Error while trying to connect to sabnzbd with URL %s" % f.url)
         raise DownloaderException("Unable to contact SabNZBd")
Esempio n. 9
0
 def get_categories(self):
     self.logger.debug("Sending categories request to sabnzbd")
     f = self.get_sab()
     f.add({"mode": "get_cats", "output": "json"})
     try:
         r = webaccess.get(f.tostr(), timeout=15)
         r.raise_for_status()
         return r.json()["categories"]
     except (SSLError, HTTPError, ConnectionError, ReadTimeout, InvalidSchema, MissingSchema):
         self.logger.exception("Error while trying to connect to sabnzbd with URL %s" % f.url)
         raise DownloaderException("Unable to contact SabNZBd")
Esempio n. 10
0
def _testId(host,
            apikey,
            t,
            idkey,
            idvalue,
            expectedResult,
            username=None,
            password=None):
    logger.info("Testing for ID capability \"%s\"" % idkey)

    try:
        url = _build_base_url(host, apikey, t, None, 25)
        url.query.add({idkey: idvalue})
        headers = {'User-Agent': config.settings.searching.userAgent}
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url,
                          timeout=config.settings.searching.timeout,
                          headers=headers,
                          auth=HTTPBasicAuth(username, password)
                          if username is not None else None)
        r.raise_for_status()
        titles = []
        tree = ET.fromstring(r.content)
    except Exception as e:
        logger.error("Error getting or parsing XML: %s" % e)
        raise IndexerAccessException("Error getting or parsing XML", None)
    for item in tree.find("channel").findall("item"):
        titles.append(item.find("title").text)

    if len(titles) == 0:
        logger.debug("Search with t=%s and %s=%s returned no results" %
                     (t, idkey, idvalue))
        return False, t
    countWrong = 0
    for title in titles:
        title = title.lower()
        if expectedResult.lower() not in title:
            logger.debug(
                "Search with t=%s and %s=%s returned \"%s\" which does not contain the expected string \"%s\""
                % (t, idkey, idvalue, title, expectedResult))
            countWrong += 1
    percentWrong = (100 * countWrong) / len(titles)
    if percentWrong > 30:
        logger.info(
            "%d%% wrong results, this indexer probably doesn't support %s" %
            (percentWrong, idkey))
        return False, t
    logger.info("%d%% wrong results, this indexer probably supports %s" %
                (percentWrong, idkey))

    return True, t
Esempio n. 11
0
 def getChangelogFromRepository(self):
     url = furl(self.repositoryBase)
     url.host = "raw.%s" % url.host
     url.path.add(self.repository)
     url.path.add(self.branch)
     url.path.add("changelog.md")
     logger.debug("Loading changelog from %s" % url)
     try:
         r = webaccess.get(url)
         r.raise_for_status()
         return r.text
     except requests.RequestException as e:
         logger.error("Error downloading changelog.md from %s to check new updates: %s" % (url if url is not None else " Github", e))
         return None
Esempio n. 12
0
 def get(self, url, timeout=None, cookies=None):
     # overwrite for special handling, e.g. cookies
     headers = {"User-Agent": config.settings.searching.userAgent}
     if timeout is None:
         timeout = self.settings.timeout
     if timeout is None:
         timeout = config.settings.searching.timeout
     if hasattr(self.settings, "username") and self.settings.username and self.settings.password:
         auth = HTTPBasicAuth(self.settings.username, self.settings.password)
         self.debug("Using HTTP auth")
     else:
         auth = None
     self.debug("Requesting %s with timeout %d" % (url, timeout))
     return webaccess.get(url, timeout=timeout, cookies=cookies, headers=headers, auth=auth)
Esempio n. 13
0
 def getLatestVersionFromRepository(self):
     url = furl(self.repositoryBase)
     url.host = "raw.%s" % url.host
     url.path.add(self.repository)
     url.path.add(self.branch)
     if self.subfolder:
         url.path.add(self.subfolder)
     url.path.add("version.txt")
     logger.debug("Loading repository version from %s" % url)
     try:
         r = webaccess.get(url)
         r.raise_for_status()
         return versiontuple(r.text.strip()), r.text.strip()
     except requests.RequestException as e:
         logger.error("Error downloading version.txt from %s to check new updates: %s" % (url if url is not None else " Github", e))
         return None, None
Esempio n. 14
0
def find_series_ids(input):
    info = webaccess.get("http://api.tvmaze.com/search/shows?q=%s" % input)
    info.raise_for_status()
    results = []
    for result in info.json():
        result = result["show"]
        if result["externals"]["thetvdb"] is None:
            logger.info("Did not find TVDB ID for %s. Will skip this result." % result["name"])
            continue
        info = {"label": result["name"], "value": str(result["externals"]["thetvdb"]), "title": result["name"]}
        try:
            info["poster"] = result["image"]["medium"]
        except:
            logger.debug("No poster found for %s" % result["name"])
            pass
        results.append(info)
    return results
Esempio n. 15
0
def _testId(host, apikey, t, idkey, idvalue, expectedResult, username=None, password=None):
    logger.info("Testing for ID capability \"%s\"" % idkey)

    try:
        url = _build_base_url(host, apikey, t, None, 50)
        url.query.add({idkey: idvalue})
        headers = {
            'User-Agent': config.settings.searching.userAgent
        }
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url, timeout=config.settings.searching.timeout, headers=headers, auth=HTTPBasicAuth(username, password) if username is not None else None)
        r.raise_for_status()
        logger.debug("Indexer returned: " + r.text[:500])
        check_auth(r.text, None)
        titles = []
        tree = ET.fromstring(r.content)
    except Exception as e:
        if isinstance(e, IndexerAccessException):
            raise
        else:
            logger.error("Error getting or parsing XML: %s" % e)
            raise IndexerAccessException("Error getting or parsing XML", None)
    for item in tree.find("channel").findall("item"):
        titles.append(item.find("title").text)

    #Hacky way of preventing nzb.su of shutting us down. If more than 5 requests are done in 6 seconds the indexer will block further requests for some time
    if "nzb.su" in host.lower():
        sleep(1)

    if len(titles) == 0:
        logger.debug("Search with t=%s and %s=%s returned no results" % (t, idkey, idvalue))
        return False, t
    countWrong = 0
    for title in titles:
        title = title.lower()
        if expectedResult.lower() not in title:
            logger.debug("Search with t=%s and %s=%s returned \"%s\" which does not contain the expected string \"%s\"" % (t, idkey, idvalue, title, expectedResult))
            countWrong += 1
    percentWrong = (100 * countWrong) / len(titles)
    if percentWrong > 10:
        logger.info("%d%% wrong results, this indexer probably doesn't support %s" % (percentWrong, idkey))
        return False, t
    logger.info("%d%% wrong results, this indexer probably supports %s" % (percentWrong, idkey))

    return True, t
Esempio n. 16
0
 def get(self, url, timeout=None, cookies=None):
     # overwrite for special handling, e.g. cookies
     headers = {'User-Agent': "NZBHydra"}
     if hasattr(self.settings, "userAgent") and self.settings.userAgent:
         headers['User-Agent'] = self.settings.userAgent
     elif config.settings.searching.userAgent:
         headers['User-Agent'] = config.settings.searching.userAgent
     if timeout is None:
         timeout = self.settings.timeout
     if timeout is None:
         timeout = config.settings.searching.timeout
     if hasattr(self.settings, "username") and self.settings.username and self.settings.password:
         auth = HTTPBasicAuth(self.settings.username, self.settings.password)
         self.debug("Using HTTP auth")
     else:
         auth = None
     self.debug("Requesting %s with timeout %d" % (url, timeout))
     return webaccess.get(url, timeout=timeout, cookies=cookies, headers=headers, auth=auth)
Esempio n. 17
0
    def add_link(self, link, title, category):
        self.logger.debug("Sending add-link request for %s to sabnzbd" % title)
        if title is None:
            title = ""
        else:
            if not title.endswith(".nzb"):  # sabnzbd skips entries of which the filename does not end with NZB
                title += ".nzb"

        f = self.get_sab()
        f.add({"mode": "addurl", "name": link, "nzbname": title})
        if category is not None:
            f.add({"cat": category})
        try:
            r = webaccess.get(f.tostr(), timeout=15)
            r.raise_for_status()
            return r.json()["status"]
        except (SSLError, HTTPError, ConnectionError, ReadTimeout, InvalidSchema, MissingSchema):
            self.logger.exception("Error while trying to connect to sabnzbd using link %s" % link)
            return False
Esempio n. 18
0
    def add_link(self, link, title, category):
        self.logger.debug("Sending add-link request for %s to sabnzbd" % title)
        if title is None:
            title = ""
        else:
            if not title.endswith(".nzb"):  # sabnzbd skips entries of which the filename does not end with NZB
                title += ".nzb"

        f = self.get_sab()
        f.add({"mode": "addurl", "name": link, "nzbname": title})
        if category is not None:
            f.add({"cat": category})
        try:
            r = webaccess.get(f.tostr(), timeout=15)
            r.raise_for_status()
            return r.json()["status"]
        except (SSLError, HTTPError, ConnectionError, ReadTimeout, InvalidSchema, MissingSchema):
            self.logger.exception("Error while trying to connect to sabnzbd using link %s" % link)
            return False
Esempio n. 19
0
 def test(self, setting):
     self.logger.debug("Testing connection to sabnzbd")
     try:
         f = self.get_sab(setting.url, setting.apikey, setting.username, setting.password)
         f.add({"mode": "qstatus"})
         r = webaccess.get(f.tostr(), timeout=15)
         r.raise_for_status()
         if "state" in json.loads(r.text).keys():
             self.logger.info("Connection test to sabnzbd successful")
             return True, ""
         else:
             self.logger.info("Access to sabnzbd failed, probably due to wrong credentials")
             return False, "Credentials wrong?"
     except DownloaderException as e:
         self.logger.error("Error while trying to connect to sabnzbd: %s" % e)
         return False, str(e)
     except (SSLError, HTTPError, ConnectionError, ReadTimeout, InvalidSchema, MissingSchema) as e:
         self.logger.error("Error while trying to connect to sabnzbd: %s" % e)
         return False, "SABnzbd is not responding"
Esempio n. 20
0
def _testId(host, apikey, t, idkey, idvalue, expectedResult, username=None, password=None):
    logger.info("Testing for ID capability \"%s\"" % idkey)

    try:
        url = _build_base_url(host, apikey, t, None, 25)
        url.query.add({idkey: idvalue})
        headers = {
            'User-Agent': config.settings.searching.userAgent
        }
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url, timeout=config.settings.searching.timeout, headers=headers, auth=HTTPBasicAuth(username, password) if username is not None else None)
        r.raise_for_status()
        titles = []
        tree = ET.fromstring(r.content)
    except Exception as e:
        logger.error("Error getting or parsing XML: %s" % e)
        raise IndexerAccessException("Error getting or parsing XML", None)
    for item in tree.find("channel").findall("item"):
        titles.append(item.find("title").text)

    if len(titles) == 0:
        logger.debug("Search with t=%s and %s=%s returned no results" % (t, idkey, idvalue))
        return False, t
    countWrong = 0
    for title in titles:
        title = title.lower()
        if expectedResult.lower() not in title:
            logger.debug("Search with t=%s and %s=%s returned \"%s\" which does not contain the expected string \"%s\"" % (t, idkey, idvalue, title, expectedResult))
            countWrong += 1
    percentWrong = (100 * countWrong) / len(titles)
    if percentWrong > 30:
        logger.info("%d%% wrong results, this indexer probably doesn't support %s" % (percentWrong, idkey))
        return False, t
    logger.info("%d%% wrong results, this indexer probably supports %s" % (percentWrong, idkey))

    return True, t
Esempio n. 21
0
def check_caps(host,
               apikey,
               username=None,
               password=None,
               userAgent=None,
               timeout=None,
               skipIdsAndTypes=False):
    toCheck = [{
        "t": "tvsearch",
        "id": "tvdbid",
        "key": "121361",
        "expected": "Thrones"
    }, {
        "t": "tvsearch",
        "id": "rid",
        "key": "24493",
        "expected": "Thrones"
    }, {
        "t": "tvsearch",
        "id": "tvmazeid",
        "key": "82",
        "expected": "Thrones"
    }, {
        "t": "tvsearch",
        "id": "traktid",
        "key": "1390",
        "expected": "Thrones"
    }, {
        "t": "movie",
        "id": "tmdbid",
        "key": "24428",
        "expected": "Avengers"
    }, {
        "t": "movie",
        "id": "imdbid",
        "key": "0848228",
        "expected": "Avengers"
    }]
    supportedIds = []
    supportedTypes = []

    try:
        url = _build_base_url(host, apikey, "caps", None)
        headers = {
            'User-Agent':
            userAgent
            if userAgent is not None else config.settings.searching.userAgent
        }
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url,
                          timeout=timeout if timeout is not None else
                          config.settings.searching.timeout,
                          headers=headers,
                          auth=HTTPBasicAuth(username, password)
                          if username is not None else None)
        r.raise_for_status()
        logger.debug("Indexer returned: " + r.text[:500])
    except Exception as e:
        logger.error("Error getting caps XML. Error message: %s" % e)
        return False, e.message, None

    try:
        check_auth(r.text, host)
    except IndexerAccessException as e:
        return False, e.message, None

    try:
        tree = ET.fromstring(r.content)
    except Exception as e:
        logger.error("Unable to parse indexer response")
        return False, e.message, None

    # Check indexer type (nzedb, newznab, nntmux)
    try:
        url = _build_base_url(host, apikey, "tvsearch", None)
        headers = {
            'User-Agent':
            userAgent
            if userAgent is not None else config.settings.searching.userAgent
        }
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url,
                          timeout=timeout if timeout is not None else
                          config.settings.searching.timeout,
                          headers=headers,
                          auth=HTTPBasicAuth(username, password)
                          if username is not None else None)
        r.raise_for_status()
    except Exception as e:
        logger.error(
            "Unable to connect to indexer to findout indexer backend type: %s"
            % e)
        return False, "Unable to connect to indexer to findout indexer backend type", None

    logger.debug("Indexer returned: " + r.text[:500])
    generator = ET.fromstring(r.content).find("channel/generator")
    if generator is not None:
        backend = generator.text
        logger.info(
            "Found generator tag indicating that indexer %s is a %s based indexer"
            % (host, backend))
    else:
        logger.info("Assuming indexer %s is a newznab based indexer" % host)
        backend = "newznab"

    try:
        categories = []
        subCategories = {}
        for xmlMainCategory in tree.find("categories").findall("category"):
            categories.append(xmlMainCategory.attrib["name"].lower())
            for subcat in xmlMainCategory.findall("subcat"):
                subCategories[subcat.attrib["id"]] = subcat.attrib["name"]
        animeCategory = getCategoryNumberOrNone(
            subCategories, ["5070", "7040"],
            ["anime", "tv/anime", "tv->anime"])
        comicCategory = getCategoryNumberOrNone(
            subCategories, ["7030"], ["comic", "comics", "books/comics"])
        magazineCategory = getCategoryNumberOrNone(
            subCategories, ["7010"], ["magazine", "mags", "magazines"])
        audiobookCategory = getCategoryNumberOrNone(
            subCategories, ["3030"], ["audiobook", "audio", "audio/audiobook"])
        ebookCategory = getCategoryNumberOrNone(subCategories,
                                                ["7020", "4050"], ["ebook"])
        supportedCategories = []
        if "movies" in categories:
            supportedCategories.extend(["movies", "movieshd", "moviessd"])
        if "tv" in categories:
            supportedCategories.extend(["tv", "tvhd", "tvsd"])
        if "audio" in categories or "music" in categories:
            supportedCategories.extend(["audio", "flac", "mp3"])
        if "xxx" in categories or "adult" in categories:
            supportedCategories.append("xxx")
        if "console" in categories or "gaming" in categories or "games" in categories:
            supportedCategories.append("console")
        if "apps" in categories or "pc" in categories:
            supportedCategories.append("pc")
        if animeCategory:
            supportedCategories.append("anime")
        if comicCategory:
            supportedCategories.append("comic")
        if audiobookCategory:
            supportedCategories.append("audiobook")
        if ebookCategory:
            supportedCategories.append("ebook")

        searching = tree.find("searching")
        if searching is not None and not skipIdsAndTypes:
            book_search = searching.find("book-search")
            if book_search is not None and book_search.attrib[
                    "available"] == "yes":
                supportedTypes.append("book")
                logger.debug("Found supported book search")

            can_handle = [y["id"] for y in toCheck]
            supportedIds = [x for x in supportedIds
                            if x in can_handle]  # Only use those we can handle

        if not skipIdsAndTypes:
            logger.info(
                "Checking capabilities of indexer by brute force to make sure supported search types are correctly recognized"
            )
            supportedIds, supportedTypes = checkCapsBruteForce(
                supportedTypes,
                toCheck,
                host,
                apikey,
                username=username,
                password=password)

        return True, None, {
            "animeCategory": animeCategory,
            "comicCategory": comicCategory,
            "magazineCategory": magazineCategory,
            "audiobookCategory": audiobookCategory,
            "ebookCategory": ebookCategory,
            "supportedIds": sorted(list(set(supportedIds))),
            "supportedTypes": sorted(list(set(supportedTypes))),
            "supportedCategories": supportedCategories,
            "supportsAllCategories": True,
            "backend": backend
        }
    except Exception as e:
        message = e.message if hasattr(e, "message") else str(e)
        logger.error("Error getting or parsing caps XML. Error message: %s" %
                     message)
        return False, "Unable to check caps: %s" % message, None
Esempio n. 22
0
    def update(self):
        """
        Downloads the latest source tarball from github and installs it over the existing version.
        """
        base_url = furl(self.repositoryBase)
        base_url.path.add(self.repository)
        base_url.path.add("tarball")
        base_url.path.add(self.branch)
        tar_download_url = base_url.url
        main_dir = os.path.dirname(os.path.dirname(__file__))

        try:
            self.backup()
            
            # prepare the update dir
            update_dir = os.path.join(main_dir, 'update')

            if os.path.isdir(update_dir):
                logger.info("Clearing out update folder " + update_dir + " before extracting")
                shutil.rmtree(update_dir)

            logger.info("Creating update folder " + update_dir + " before extracting")
            os.makedirs(update_dir)

            # retrieve file
            logger.info("Downloading update from " + repr(tar_download_url))
            tar_download_path = os.path.join(update_dir, 'sb-update.tar')
            response = webaccess.get(tar_download_url, stream=True) #Apparently SSL causes problems on some systems (#138)b
            with open(tar_download_path, 'wb') as out_file:
                shutil.copyfileobj(response.raw, out_file)
            del response

            if not os.path.isfile(tar_download_path):
                logger.error("Unable to retrieve new version from " + tar_download_url + ", can't update")
                return False

            if not tarfile.is_tarfile(tar_download_path):
                logger.error("Retrieved version from " + tar_download_url + " is corrupt, can't update")
                return False

            # extract to sb-update dir
            logger.info("Extracting update file " + tar_download_path)
            tar = tarfile.open(tar_download_path)
            tar.extractall(update_dir)
            tar.close()

            # delete .tar.gz
            logger.info("Deleting update file " + tar_download_path)
            os.remove(tar_download_path)

            # find update dir name
            update_dir_contents = [x for x in os.listdir(update_dir) if os.path.isdir(os.path.join(update_dir, x))]
            if len(update_dir_contents) != 1:
                logger.error("Invalid update data, update failed: " + str(update_dir_contents))
                return False
            content_dir = os.path.join(update_dir, update_dir_contents[0])

            # walk temp folder and move files to main folder
            logger.info("Moving files from " + content_dir + " to " + main_dir)
            for dirname, dirnames, filenames in os.walk(content_dir):
                dirname = dirname[len(content_dir) + 1:]
                for curfile in filenames:
                    old_path = os.path.join(content_dir, dirname, curfile)
                    new_path = os.path.join(main_dir, dirname, curfile)
            
                    if os.path.isfile(new_path):
                        os.remove(new_path)
                    os.renames(old_path, new_path)


        except Exception as e:
            logger.error("Error while trying to update: " + str(e))
            return False
        logger.info("Update successful")
        return True
Esempio n. 23
0
def check_caps(host, apikey, username=None, password=None, userAgent=None, timeout=None, skipIdsAndTypes=False):
    toCheck = [
        {"t": "tvsearch",
         "id": "tvdbid",
         "key": "121361",
         "expected": "Thrones"
         },
        {"t": "movie",
         "id": "imdbid",
         "key": "0848228",
         "expected": "Avengers"
         },
        {"t": "tvsearch",
         "id": "rid",
         "key": "24493",
         "expected": "Thrones"
         },
        {"t": "tvsearch",
         "id": "tvmazeid",
         "key": "82",
         "expected": "Thrones"
         },
        {"t": "tvsearch",
         "id": "traktid",
         "key": "1390",
         "expected": "Thrones"
         },
        {"t": "tvsearch",
         "id": "tmdbid",
         "key": "1399",
         "expected": "Thrones"
         }

    ]
    supportedIds = []
    supportedTypes = []
    # Try to find out from caps first
    try:
        url = _build_base_url(host, apikey, "caps", None)
        headers = {
            'User-Agent': userAgent if userAgent is not None else config.settings.searching.userAgent
        }
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url, timeout=timeout if timeout is not None else config.settings.searching.timeout, headers=headers, auth=HTTPBasicAuth(username, password) if username is not None else None)
        r.raise_for_status()

        tree = ET.fromstring(r.content)

        categories = []
        subCategories = {}
        for xmlMainCategory in tree.find("categories").findall("category"):
            categories.append(xmlMainCategory.attrib["name"].lower())
            for subcat in xmlMainCategory.findall("subcat"):
                subCategories[subcat.attrib["id"]] = subcat.attrib["name"]
        animeCategory = getCategoryNumberOrNone(subCategories, ["5070", "7040"], ["anime", "tv/anime", "tv->anime"])
        comicCategory = getCategoryNumberOrNone(subCategories, ["7030"], ["comic", "comics", "books/comics"])
        magazineCategory = getCategoryNumberOrNone(subCategories, ["7010"], ["magazine", "mags", "magazines"])
        audiobookCategory = getCategoryNumberOrNone(subCategories, ["3030"], ["audiobook", "audio", "audio/audiobook"])
        ebookCategory = getCategoryNumberOrNone(subCategories, ["7020", "4050"], ["ebook"])
        supportedCategories = []
        if "movies" in categories:
            supportedCategories.extend(["movies", "movieshd", "moviessd"])
        if "tv" in categories:
            supportedCategories.extend(["tv", "tvhd", "tvsd"])
        if "audio" in categories:
            supportedCategories.extend(["audio", "flac", "mp3"])
        if "xxx" in categories or "adult" in categories:
            supportedCategories.append("xxx")
        if "console" in categories or "gaming" in categories:
            supportedCategories.append("console")
        if "apps" in categories or "pc" in categories:
            supportedCategories.append("pc")
        if animeCategory:
            supportedCategories.append("anime")
        if comicCategory:
            supportedCategories.append("comic")
        if audiobookCategory:
            supportedCategories.append("audiobook")
        if ebookCategory:
            supportedCategories.append("ebook")
        

        searching = tree.find("searching")
        if searching is not None and not skipIdsAndTypes:
            book_search = searching.find("book-search")
            if book_search is not None and book_search.attrib["available"] == "yes":
                supportedTypes.append("movie")
                logger.debug("Found supported book search")

            can_handle = [y["id"] for y in toCheck]
            supportedIds = [x for x in supportedIds if x in can_handle]  # Only use those we can handle

        if not skipIdsAndTypes:
            logger.info("Checking capabilities of indexer by brute force to make sure supported search types are correctly recognized")
            supportedIds, supportedTypes = checkCapsBruteForce(supportedTypes, toCheck, host, apikey, username=username, password=password)

        #Check indexer type (nzedb, newznab, nntmux)
        url = _build_base_url(host, apikey, "tvsearch", None)
        headers = {
            'User-Agent': userAgent if userAgent is not None else config.settings.searching.userAgent
        }
        logger.debug("Requesting %s" % url)
        r = webaccess.get(url, timeout=timeout if timeout is not None else config.settings.searching.timeout, headers=headers, auth=HTTPBasicAuth(username, password) if username is not None else None)
        r.raise_for_status()
        generator = ET.fromstring(r.content).find("channel/generator")
        if generator is not None:
            backend = generator.text
            logger.info("Found generator tag indicating that indexer %s is a %s based indexer" % (host, backend))
        else:
            logger.info("Assuming indexer %s is a newznab based indexer" % host)
            backend = "newznab"


        return {
            "animeCategory": animeCategory, 
            "comicCategory": comicCategory, 
            "magazineCategory": magazineCategory, 
            "audiobookCategory": audiobookCategory, 
            "ebookCategory": ebookCategory, 
            "supportedIds": sorted(list(set(supportedIds))), 
            "supportedTypes": sorted(list(set(supportedTypes))),
            "supportedCategories": supportedCategories,
            "supportsAllCategories": len(supportedCategories) == getNumberOfSelectableCategories() - 1, #Without "all
            "backend": backend
        }

    except HTTPError as e:
        logger.error("Error while trying to determine caps: %s" % e)
        raise IndexerResultParsingException("Unable to check caps: %s" % str(e), None)
    except Exception as e:
        logger.error("Error getting or parsing caps XML. Error message: %s" % e)
        return None
Esempio n. 24
0
def run(arguments):
    arguments.config = arguments.config if os.path.isabs(
        arguments.config) else os.path.join(nzbhydra.getBasePath(),
                                            arguments.config)
    arguments.database = arguments.database if os.path.isabs(
        arguments.database) else os.path.join(nzbhydra.getBasePath(),
                                              arguments.database)
    nzbhydra.configFile = settings_file = arguments.config
    nzbhydra.databaseFile = database_file = arguments.database

    logger.notice("Loading settings from {}".format(settings_file))
    try:
        config.load(settings_file)
        config.save(settings_file)  # Write any new settings back to the file
        log.setup_custom_logger(arguments.logfile, arguments.quiet)
    except Exception:
        print(
            "An error occured during migrating the old config. Sorry about that...: "
        )
        traceback.print_exc(file=sys.stdout)
        print("Trying to log messages from migration...")
        config.logLogMessages()
        os._exit(-5)

    try:
        logger.info("Started")

        if arguments.daemon:
            logger.info("Daemonizing...")
            daemonize(arguments.pidfile)

        config.logLogMessages()

        if arguments.clearloganddb:
            logger.warning("Deleting log file and database now as requested")
            try:
                logger.warning("Deleting database file %s" % database_file)
                os.unlink(database_file)
            except Exception as e:
                logger.error("Unable to close or delete log file: %s" % e)

            try:
                handler = logger.handlers[1] if len(
                    logger.handlers) == 2 else logger.handlers[0]
                filename = handler.stream.name

                if filename and os.path.exists(filename):
                    logger.warn("Deleting file %s" % filename)
                handler.flush()
                handler.close()
                logger.removeHandler(handler)
                os.unlink(filename)
                logger.addHandler(handler)
            except Exception as e:
                print("Unable to close or delete log file: %s" % e)

        try:
            import _sqlite3
            logger.debug("SQLite3 version: %s" % _sqlite3.sqlite_version)
        except:
            logger.error("Unable to log SQLite version")

        logger.info("Loading database file %s" % database_file)
        if not os.path.exists(database_file):
            database.init_db(database_file)
        else:
            database.update_db(database_file)
        logger.info("Starting db")

        indexers.read_indexers_from_config()

        if config.settings.main.debug:
            logger.info("Debug mode enabled")

        # Clean up any "old" files from last update
        oldfiles = glob.glob("*.updated")
        if len(oldfiles) > 0:
            logger.info("Deleting %d old files remaining from update" %
                        len(oldfiles))
            for filename in oldfiles:
                try:
                    if "hydratray" not in filename:
                        logger.debug("Deleting %s" % filename)
                        os.remove(filename)
                    else:
                        logger.debug(
                            "Not deleting %s because it's still running. TrayHelper will restart itself"
                            % filename)
                except Exception:
                    logger.warn(
                        "Unable to delete old file %s. Please delete manually"
                        % filename)

        host = config.settings.main.host if arguments.host is None else arguments.host
        port = config.settings.main.port if arguments.port is None else arguments.port
        nzbhydra.urlBase = config.settings.main.urlBase if arguments.urlbase is None else arguments.urlbase

        socksproxy = config.settings.main.socksProxy if arguments.socksproxy is None else arguments.socksproxy
        if socksproxy:
            webaccess.set_proxies(socksproxy)
        elif config.settings.main.httpProxy:
            webaccess.set_proxies(config.settings.main.httpProxy,
                                  config.settings.main.httpsProxy)

        # Download a very small file from github to get a good estimate how many instances of hydra are running. Only executed once per installation (well, per settings.cfg instance)
        if not config.settings.main.downloadCounterExecuted and not config.settings.main.isFirstStart:
            try:
                webaccess.get(
                    "https://github.com/theotherp/apitests/releases/download/v5.0.0/downloadcounter2.zip"
                )
            except:
                pass
            config.settings.main.downloadCounterExecuted = True
            config.save()

        if config.settings.main.externalUrl is not None and config.settings.main.externalUrl != "":
            f = furl(config.settings.main.externalUrl)
            logger.notice("Starting web app on %s:%d" % (host, port))
        else:
            f = furl()

            if config.settings.main.host == "0.0.0.0":
                f.host = "127.0.0.1"
            elif config.settings.main.host == "::":
                f.host = "[::1]"
            elif ":" in config.settings.main.host:
                f.host = "[%s]" % config.settings.main.host
            else:
                f.host = config.settings.main.host
            f.port = port
            f.scheme = "https" if config.settings.main.ssl else "http"
            if nzbhydra.urlBase is not None:
                f.path = nzbhydra.urlBase + "/"
            logger.notice("Starting web app on %s:%d" % (f.host, port))
        if not arguments.nobrowser and config.settings.main.startupBrowser:
            if arguments.restarted:
                logger.info("Not opening the browser after restart")
            else:
                logger.info("Opening browser to %s" % f.url)
                webbrowser.open_new(f.url)
        else:
            logger.notice("Go to %s for the frontend" % f.url)
        if config.settings.main.isFirstStart:
            config.settings.main.isFirstStart = False
            config.save()
        web.run(host, port, basepath)
    except Exception:
        logger.exception("Fatal error occurred")
Esempio n. 25
0
 def get(self, query, timeout=None, cookies=None):
     # overwrite for special handling, e.g. cookies
     return webaccess.get(query, timeout=timeout, cookies={"agreed": "true", "lang": "2"})