Ejemplo n.º 1
0
def parse_date_time(d, t, network, dateOnly=False):
    """
    Parse date and time string into local time
    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network, load_network_dict())

    hr = 0
    m = 0

    if parsed_time:
        hr = tryInt(parsed_time.group("hour"))
        m = tryInt(parsed_time.group("minute"))

        ap = parsed_time.group("meridiem")
        ap = ap[0].lower() if ap else ""

        if ap == "a" and hr == 12:
            hr -= 12
        elif ap == "p" and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.fromordinal(max(tryInt(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz) if not dateOnly else result.replace(tzinfo=network_tz)
Ejemplo n.º 2
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = tryInt(mo.group(1))
                m = tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except Exception:
                hr = 0
                m = 0
        else:
            try:
                hr = tryInt(mo.group(1))
                m = tryInt(mo.group(6))
            except Exception:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    te = datetime.datetime.fromordinal(tryInt(d) or 1)
    try:
        foreign_timezone = get_network_timezone(network, load_network_dict())
        return datetime.datetime(te.year,
                                 te.month,
                                 te.day,
                                 hr,
                                 m,
                                 tzinfo=foreign_timezone)
    except Exception:
        return datetime.datetime(te.year,
                                 te.month,
                                 te.day,
                                 hr,
                                 m,
                                 tzinfo=sr_timezone)
Ejemplo n.º 3
0
    def search(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():  # Mode = RSS, Season, Episode
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s " % search_string)

                searchURL = self.urls['base_url'] + "?s=%s&out=json&filter=2101&num=150" % quote_plus(
                    search_string.encode('utf-8'))

                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)

                try:
                    torrents = sickrage.srCore.srWebSession.get(searchURL).json()
                    if not (int(torrents.get("total_found", 0)) > 0):
                        raise
                except Exception:
                    sickrage.srCore.srLogger.debug("No data returned from provider")
                    continue

                if "total_found" in torrents:
                    del torrents["total_found"]

                results = []
                for i in torrents:
                    title = torrents[i]["title"]
                    seeders = tryInt(torrents[i]["seeds"], 1)
                    leechers = tryInt(torrents[i]["leechs"], 0)
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            sickrage.srCore.srLogger.debug(
                                "Torrent doesn't meet minimum seeds & leechers not selecting : %s" % title)
                        continue

                    t_hash = torrents[i]["torrent_hash"]
                    download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title
                    size = int(torrents[i]["torrent_size"])

                    if not all([title, download_url]):
                        continue

                    item = title, download_url, size, seeders, leechers

                    if mode != 'RSS':
                        sickrage.srCore.srLogger.debug("Found result: %s" % title)

                    items[mode].append(item)

            # For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 4
0
def getFileMetadata(filename):
    import enzyme

    try:
        p = enzyme.parse(filename)

        # Video codec
        vc = ('H264' if p.video[0].codec == 'AVC1' else
              'x265' if p.video[0].codec == 'HEVC' else p.video[0].codec)

        # Audio codec
        ac = p.audio[0].codec
        try:
            ac = audio_codec_map.get(p.audio[0].codec)
        except:
            pass

        # Find title in video headers
        titles = []

        try:
            if p.title:
                titles.append(p.title)
        except:
            sickrage.srCore.srLogger.error(
                'Failed getting title from meta: %s', traceback.format_exc())

        for video in p.video:
            try:
                if video.title:
                    titles.append(video.title)
            except:
                sickrage.srCore.srLogger.error(
                    'Failed getting title from meta: %s',
                    traceback.format_exc())

        return {
            'titles': list(set(titles)),
            'video': vc,
            'audio': ac,
            'resolution_width': tryInt(p.video[0].width),
            'resolution_height': tryInt(p.video[0].height),
            'audio_channels': p.audio[0].channels,
        }
    except enzyme.exceptions.ParseError:
        sickrage.srCore.srLogger.debug('Failed to parse meta for %s', filename)
    except enzyme.exceptions.NoParserError:
        sickrage.srCore.srLogger.debug('No parser found for %s', filename)
    except:
        sickrage.srCore.srLogger.debug('Failed parsing %s', filename)

    return {}
Ejemplo n.º 5
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = tryInt(mo.group(1))
                m = tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except Exception:
                hr = 0
                m = 0
        else:
            try:
                hr = tryInt(mo.group(1))
                m = tryInt(mo.group(6))
            except Exception:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    te = datetime.datetime.fromordinal(tryInt(d) or 1)
    try:
        foreign_timezone = get_network_timezone(network, load_network_dict())
        return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
    except Exception:
        return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=sr_timezone)
Ejemplo n.º 6
0
def getFileMetadata(filename):
    import enzyme

    try:
        p = enzyme.parse(filename)

        # Video codec
        vc = ('H264' if p.video[0].codec == 'AVC1' else 'x265' if p.video[0].codec == 'HEVC' else p.video[0].codec)

        # Audio codec
        ac = p.audio[0].codec
        try: ac = audio_codec_map.get(p.audio[0].codec)
        except: pass

        # Find title in video headers
        titles = []

        try:
            if p.title:
                titles.append(p.title)
        except:
            sickrage.srCore.srLogger.error('Failed getting title from meta: %s', traceback.format_exc())

        for video in p.video:
            try:
                if video.title:
                    titles.append(video.title)
            except:
                sickrage.srCore.srLogger.error('Failed getting title from meta: %s', traceback.format_exc())

        return {
            'titles': list(set(titles)),
            'video': vc,
            'audio': ac,
            'resolution_width': tryInt(p.video[0].width),
            'resolution_height': tryInt(p.video[0].height),
            'audio_channels': p.audio[0].channels,
        }
    except enzyme.exceptions.ParseError:
        sickrage.srCore.srLogger.debug('Failed to parse meta for %s', filename)
    except enzyme.exceptions.NoParserError:
        sickrage.srCore.srLogger.debug('No parser found for %s', filename)
    except:
        sickrage.srCore.srLogger.debug('Failed parsing %s', filename)

    return {}
Ejemplo n.º 7
0
def parse_date_time(d, t, network, dateOnly=False):
    """
    Parse date and time string into local time
    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network)

    hr = 0
    m = 0

    if parsed_time:
        hr = tryInt(parsed_time.group('hour'))
        m = tryInt(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.fromordinal(max(tryInt(d), 1))

    return result.replace(
        hour=hr, minute=m,
        tzinfo=network_tz) if not dateOnly else result.replace(
            tzinfo=network_tz)
Ejemplo n.º 8
0
    def search(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        freeleech = '3' if self.freeleech else '0'

        if not self.login():
            return results

        for mode in search_params.keys():
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s " % search_string)

                searchURL = self.urls['search'] % (freeleech, search_string)
                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)
                max_page_number = 0

                try:
                    data = sickrage.srCore.srWebSession.get(searchURL).text
                except Exception:
                    sickrage.srCore.srLogger.debug("No data returned from provider")
                    continue

                try:
                    with bs4_parser(data) as html:

                        # Check to see if there is more than 1 page of results
                        pager = html.find('div', {'class': 'pager'})
                        if pager:
                            page_links = pager.find_all('a', href=True)
                        else:
                            page_links = []

                        if len(page_links) > 0:
                            for lnk in page_links:
                                link_text = lnk.text.strip()
                                if link_text.isdigit():
                                    page_int = int(link_text)
                                    if page_int > max_page_number:
                                        max_page_number = page_int

                        # limit page number to 15 just in case something goes wrong
                        if max_page_number > 15:
                            max_page_number = 15
                        # limit RSS search
                        if max_page_number > 3 and mode == 'RSS':
                            max_page_number = 3
                except Exception:
                    sickrage.srCore.srLogger.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())
                    continue

                data_response_list = [data]

                # Freshon starts counting pages from zero, even though it displays numbers from 1
                if max_page_number > 1:
                    for i in range(1, max_page_number):
                        time.sleep(1)
                        page_searchURL = searchURL + '&page=' + str(i)

                        try:
                            page_html = sickrage.srCore.srWebSession.get(page_searchURL).text
                        except Exception:
                            continue

                        data_response_list.append(page_html)

                try:

                    for data in data_response_list:

                        with bs4_parser(data) as html:

                            torrent_rows = html.findAll("tr", {"class": re.compile('torrent_[0-9]*')})

                            # Continue only if a Release is found
                            if len(torrent_rows) == 0:
                                sickrage.srCore.srLogger.debug("Data returned from provider does not contain any torrents")
                                continue

                            for individual_torrent in torrent_rows:

                                # skip if torrent has been nuked due to poor quality
                                if individual_torrent.find('img', alt='Nuked') is not None:
                                    continue

                                try:
                                    title = individual_torrent.find('a', {'class': 'torrent_name_link'})['title']
                                except Exception:
                                    sickrage.srCore.srLogger.warning(
                                        "Unable to parse torrent title. Traceback: %s " % traceback.format_exc())
                                    continue

                                try:
                                    details_url = individual_torrent.find('a', {'class': 'torrent_name_link'})['href']
                                    torrent_id = int((re.match('.*?([0-9]+)$', details_url).group(1)).strip())
                                    download_url = self.urls['download'] % (str(torrent_id))
                                    seeders = tryInt(individual_torrent.find('td', {'class': 'table_seeders'}).find(
                                        'span').text.strip(), 1)
                                    leechers = tryInt(individual_torrent.find('td', {'class': 'table_leechers'}).find(
                                        'a').text.strip(), 0)
                                    # FIXME
                                    size = -1
                                except Exception:
                                    continue

                                if not all([title, download_url]):
                                    continue

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        sickrage.srCore.srLogger.debug(
                                            "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                                title, seeders, leechers))
                                    continue

                                item = title, download_url, size, seeders, leechers
                                if mode != 'RSS':
                                    sickrage.srCore.srLogger.debug("Found result: %s " % title)

                                items[mode].append(item)

                except Exception:
                    sickrage.srCore.srLogger.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 9
0
    def search(self,
               search_strings,
               search_mode='eponly',
               epcount=0,
               age=0,
               epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():  # Mode = RSS, Season, Episode
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s " %
                                                   search_string)

                searchURL = self.urls[
                    'base_url'] + "?s=%s&out=json&filter=2101&num=150" % quote_plus(
                        search_string.encode('utf-8'))

                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)

                try:
                    torrents = sickrage.srCore.srWebSession.get(
                        searchURL).json()
                    if not (int(torrents.get("total_found", 0)) > 0):
                        raise
                except Exception:
                    sickrage.srCore.srLogger.debug(
                        "No data returned from provider")
                    continue

                if "total_found" in torrents:
                    del torrents["total_found"]

                results = []
                for i in torrents:
                    title = torrents[i]["title"]
                    seeders = tryInt(torrents[i]["seeds"], 1)
                    leechers = tryInt(torrents[i]["leechs"], 0)
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            sickrage.srCore.srLogger.debug(
                                "Torrent doesn't meet minimum seeds & leechers not selecting : %s"
                                % title)
                        continue

                    t_hash = torrents[i]["torrent_hash"]
                    download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title
                    size = int(torrents[i]["torrent_size"])

                    if not all([title, download_url]):
                        continue

                    item = title, download_url, size, seeders, leechers

                    if mode != 'RSS':
                        sickrage.srCore.srLogger.debug("Found result: %s" %
                                                       title)

                    items[mode].append(item)

            # For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 10
0
    def sendNZB(nzb, proper=False):
        """
        Sends NZB to NZBGet client

        :param nzb: nzb object
        :param proper: True if this is a Proper download, False if not. Defaults to False
        """
        addToTop = False
        nzbgetprio = 0
        category = sickrage.NZBGET_CATEGORY
        if nzb.show.is_anime:
            category = sickrage.NZBGET_CATEGORY_ANIME

        if sickrage.NZBGET_USE_HTTPS:
            nzbgetXMLrpc = "https://%(username)s:%(password)s@%(host)s/xmlrpc"
        else:
            nzbgetXMLrpc = "http://%(username)s:%(password)s@%(host)s/xmlrpc"

        if sickrage.NZBGET_HOST is None:
            sickrage.LOGGER.error("No NZBget host found in configuration. Please configure it.")
            return False

        url = nzbgetXMLrpc % {"host": sickrage.NZBGET_HOST, "username": sickrage.NZBGET_USERNAME,
                              "password": sickrage.NZBGET_PASSWORD}

        nzbGetRPC = xmlrpclib.ServerProxy(url)
        try:
            if nzbGetRPC.writelog("INFO", "SiCKRAGE connected to drop of %s any moment now." % (nzb.name + ".nzb")):
                sickrage.LOGGER.debug("Successful connected to NZBget")
            else:
                sickrage.LOGGER.error("Successful connected to NZBget, but unable to send a message")

        except httplib.socket.error:
            sickrage.LOGGER.error(
                    "Please check your NZBget host and port (if it is running). NZBget is not responding to this combination")
            return False

        except xmlrpclib.ProtocolError as e:
            if e.errmsg == "Unauthorized":
                sickrage.LOGGER.error("NZBget username or password is incorrect.")
            else:
                sickrage.LOGGER.error("Protocol Error: " + e.errmsg)
            return False

        dupekey = ""
        dupescore = 0
        # if it aired recently make it high priority and generate DupeKey/Score
        for curEp in nzb.episodes:
            if dupekey == "":
                if curEp.show.indexer == 1:
                    dupekey = "SiCKRAGE-" + str(curEp.show.indexerid)
                elif curEp.show.indexer == 2:
                    dupekey = "SiCKRAGE-tvr" + str(curEp.show.indexerid)
            dupekey += "-" + str(curEp.season) + "." + str(curEp.episode)
            if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
                addToTop = True
                nzbgetprio = sickrage.NZBGET_PRIORITY
            else:
                category = sickrage.NZBGET_CATEGORY_BACKLOG
                if nzb.show.is_anime:
                    category = sickrage.NZBGET_CATEGORY_ANIME_BACKLOG

        if nzb.quality != Quality.UNKNOWN:
            dupescore = nzb.quality * 100
        if proper:
            dupescore += 10

        nzbcontent64 = None
        if nzb.resultType == "nzbdata":
            data = nzb.extraInfo[0]
            nzbcontent64 = standard_b64encode(data)

        sickrage.LOGGER.info("Sending NZB to NZBget")
        sickrage.LOGGER.debug("URL: " + url)

        try:
            # Find out if nzbget supports priority (Version 9.0+), old versions beginning with a 0.x will use the old command
            nzbget_version_str = nzbGetRPC.version()
            nzbget_version = tryInt(nzbget_version_str[:nzbget_version_str.find(".")])
            if nzbget_version == 0:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, addToTop, nzbcontent64)
                else:
                    if nzb.resultType == "nzb":
                        genProvider = GenericProvider("")
                        data = genProvider.getURL(nzb.url)
                        if data is None:
                            return False
                        nzbcontent64 = standard_b64encode(data)
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, addToTop, nzbcontent64)
            elif nzbget_version == 12:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, nzbgetprio, False,
                                                     nzbcontent64, False, dupekey, dupescore, "score")
                else:
                    nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb", category, nzbgetprio, False,
                                                        nzb.url, False, dupekey, dupescore, "score")
            # v13+ has a new combined append method that accepts both (url and content)
            # also the return value has changed from boolean to integer
            # (Positive number representing NZBID of the queue item. 0 and negative numbers represent error codes.)
            elif nzbget_version >= 13:
                nzbget_result = True if nzbGetRPC.append(nzb.name + ".nzb",
                                                         nzbcontent64 if nzbcontent64 is not None else nzb.url,
                                                         category, nzbgetprio, False, False, dupekey, dupescore,
                                                         "score") > 0 else False
            else:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, nzbgetprio, False,
                                                     nzbcontent64)
                else:
                    nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb", category, nzbgetprio, False,
                                                        nzb.url)

            if nzbget_result:
                sickrage.LOGGER.debug("NZB sent to NZBget successfully")
                return True
            else:
                sickrage.LOGGER.error("NZBget could not add %s to the queue" % (nzb.name + ".nzb"))
                return False
        except Exception:
            sickrage.LOGGER.error("Connect Error to NZBget: could not add %s to the queue" % (nzb.name + ".nzb"))
            return False
Ejemplo n.º 11
0
    def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():  # Mode = RSS, Season, Episode
            sickrage.LOGGER.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:
                if mode is not 'RSS':
                    sickrage.LOGGER.debug("Search string: %s " % search_string)

                searchURL = self.urls['api'] + "?s=%s&out=json&filter=2101&num=150" % quote_plus(
                        search_string.encode('utf-8'))

                sickrage.LOGGER.debug("Search URL: %s" % searchURL)
                torrents = self.getURL(searchURL, json=True)
                if not (torrents and "total_found" in torrents and int(torrents[b"total_found"]) > 0):
                    sickrage.LOGGER.debug("Data returned from provider does not contain any torrents")
                    continue

                del torrents[b"total_found"]

                results = []
                for i in torrents:
                    title = torrents[i][b"title"]
                    seeders = tryInt(torrents[i][b"seeds"], 1)
                    leechers = tryInt(torrents[i][b"leechs"], 0)
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode is not 'RSS':
                            sickrage.LOGGER.debug("Torrent doesn't meet minimum seeds & leechers not selecting : %s" % title)
                        continue

                    t_hash = torrents[i][b"torrent_hash"]
                    size = int(torrents[i][b"torrent_size"])

                    try:
                        assert seeders < 10
                        assert mode is not 'RSS'
                        sickrage.LOGGER.debug("Torrent has less than 10 seeds getting dyn trackers: " + title)
                        trackerUrl = self.urls['api'] + "" + t_hash + "/trackers_json"
                        jdata = self.getURL(trackerUrl, json=True)
                        assert jdata is not "maintenance"
                        download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "".join(
                                ["&tr=" + s for s in jdata])
                    except (Exception, AssertionError):
                        download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.coppersurfer.tk:6969&tr=udp://open.demonii.com:1337&tr=udp://tracker.leechers-paradise.org:6969&tr=udp://exodus.desync.com:6969"

                    if not all([title, download_url]):
                        continue

                    item = title, download_url, size, seeders, leechers

                    if mode is not 'RSS':
                        sickrage.LOGGER.debug("Found result: %s" % title)

                    items[mode].append(item)

            # For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 12
0
    def search(self,
               search_strings,
               search_mode='eponly',
               epcount=0,
               age=0,
               epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self.login():
            return results

        for mode in search_strings.keys():
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    searchURL = self.urls['search'] % (
                        urllib.quote_plus(search_string), self.categories)
                else:
                    searchURL = self.urls['rss'] % self.categories

                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)
                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s" %
                                                   search_string)

                try:
                    data = sickrage.srCore.srWebSession.get(searchURL,
                                                            cache=False).text
                except Exception:
                    sickrage.srCore.srLogger.debug(
                        "No data returned from provider")
                    continue

                if data.find('No torrents here') != -1:
                    sickrage.srCore.srLogger.debug(
                        "Data returned from provider does not contain any torrents"
                    )
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    index = data.lower().index(
                        '<table class="mainblockcontenttt"')
                except ValueError:
                    sickrage.srCore.srLogger.debug(
                        "Could not find table of torrents mainblockcontenttt")
                    continue

                with bs4_parser(data[index:]) as html:
                    if not html:
                        sickrage.srCore.srLogger.debug(
                            "No html data parsed from provider")
                        continue

                    torrent_rows = []
                    torrent_table = html.find('table',
                                              class_='mainblockcontenttt')
                    if torrent_table:
                        torrent_rows = torrent_table('tr')

                    if not torrent_rows:
                        sickrage.srCore.srLogger.debug(
                            "Could not find results in returned data")
                        continue

                    # Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
                    labels = [
                        label.a.get_text(strip=True)
                        if label.a else label.get_text(strip=True)
                        for label in torrent_rows[0]('td')
                    ]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.findChildren('td')[:len(labels)]
                            if len(cells) < len(labels):
                                continue

                            title = cells[labels.index('Filename')].a.get_text(
                                strip=True)
                            seeders = tryInt(
                                cells[labels.index('S')].get_text(strip=True))
                            leechers = tryInt(
                                cells[labels.index('L')].get_text(strip=True))
                            torrent_size = cells[labels.index(
                                'Size')].get_text()

                            size = convert_size(torrent_size) or -1
                            download_url = self.urls['base_url'] + '/' + cells[
                                labels.index('Dl')].a['href']
                        except (AttributeError, TypeError, KeyError,
                                ValueError, IndexError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                sickrage.srCore.srLogger.debug(
                                    "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                    .format(title, seeders, leechers))
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            sickrage.srCore.srLogger.debug(
                                "Found result: %s " % title)

                        items[mode].append(item)

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 13
0
    def search(self,
               search_strings,
               search_mode='eponly',
               epcount=0,
               age=0,
               epObj=None):
        results = []

        for mode in search_strings:
            items = []
            sickrage.srCore.srLogger.debug('Search Mode: {}'.format(mode))
            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug(
                        'Search string: {}'.format(search_string))

                if mode == 'RSS':
                    search_url = self.urls['series']
                else:
                    search_url = urljoin(self.urls['base_url'], search_string)

                try:
                    data = sickrage.srCore.srWebSession.get(search_url,
                                                            cache=False).text
                except Exception:
                    sickrage.srCore.srLogger.debug(
                        'No data returned from provider')
                    continue

                with bs4_parser(data) as html:
                    torrent_rows = html.find_all('tr')
                    for row in torrent_rows:
                        for torrent in row.find_all('td'):
                            for link in torrent.find_all('a'):
                                fileType = ''.join(
                                    link.find_previous('i')["class"])
                                fileType = unicodedata.normalize('NFKD', fileType). \
                                    encode(sickrage.srCore.SYS_ENCODING, 'ignore')

                                if fileType == "Series":
                                    title = link.get_text(strip=True)
                                    download_url = self.get_download_url(
                                        link.get('href'))

                                    if not all([title, download_url]):
                                        continue

                                    # size
                                    size = convert_size(
                                        link.findNext('td').text) or -1

                                    # Filter unseeded torrent
                                    seeders = tryInt(
                                        link.find_next(
                                            'img', alt='seeders').parent.text,
                                        0)
                                    leechers = tryInt(
                                        link.find_next(
                                            'img', alt='leechers').parent.text,
                                        0)

                                    if seeders < self.minseed or leechers < self.minleech:
                                        if mode != 'RSS':
                                            sickrage.srCore.srLogger.debug(
                                                "Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})"
                                                .format(
                                                    title, seeders, leechers))
                                        continue

                                    items += [{
                                        'title': title,
                                        'link': download_url,
                                        'size': size,
                                        'seeders': seeders,
                                        'leechers': leechers,
                                    }]

                                    if mode != 'RSS':
                                        sickrage.srCore.srLogger.debug(
                                            "Found result: {}".format(title))

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Ejemplo n.º 14
0
    def search(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s " % search_string)

                try:
                    self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string})

                    try:
                        data = sickrage.srCore.srWebSession.get(self.urls['rss'], params=self.search_params).text
                    except Exception:
                        sickrage.srCore.srLogger.debug("No data returned from provider")
                        continue

                    if not data.startswith('<?xml'):
                        sickrage.srCore.srLogger.info('Expected xml but got something else, is your mirror failing?')
                        continue

                    try:
                        data = xmltodict.parse(data)
                    except ExpatError:
                        sickrage.srCore.srLogger.error(
                            "Failed parsing provider. Traceback: %r\n%r" % (traceback.format_exc(), data))
                        continue

                    if not all([data, 'rss' in data, 'channel' in data['rss'], 'item' in data['rss']['channel']]):
                        sickrage.srCore.srLogger.debug("Malformed rss returned, skipping")
                        continue

                    # https://github.com/martinblech/xmltodict/issues/111
                    entries = data['rss']['channel']['item']
                    entries = entries if isinstance(entries, list) else [entries]

                    for item in entries:
                        title = item['title'].decode('utf-8')
                        # info_hash = item['info_hash']
                        size = int(item['size'])
                        seeders = tryInt(item['seeders'], 0)
                        leechers = tryInt(item['leechers'], 0)
                        download_url = item['enclosure']['@url'] if 'enclosure' in item else self._magnet_from_details(
                            item['link'])

                        if not all([title, download_url]):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode != 'RSS':
                                sickrage.srCore.srLogger.debug(
                                    "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                        title, seeders, leechers))
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode != 'RSS':
                            sickrage.srCore.srLogger.debug("Found result: %s " % title)

                        items[mode].append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    sickrage.srCore.srLogger.error("Failed parsing provider. Traceback: %r" % traceback.format_exc())

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 15
0
    def search(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        if not self.login():
            return results

        for mode in search_params.keys():
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_params[mode]:

                if mode == 'RSS':
                    searchURL = self.urls['index'] % self.categories
                else:
                    searchURL = self.urls['search'] % (
                        urllib.quote_plus(search_string.encode('utf-8')), self.categories)
                    sickrage.srCore.srLogger.debug("Search string: %s " % search_string)

                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)

                try:
                    data = sickrage.srCore.srWebSession.get(searchURL, cache=False).text
                except Exception:
                    sickrage.srCore.srLogger.debug("No data returned from provider")
                    continue

                try:
                    with bs4_parser(data) as html:
                        torrent_table = html.find('table', attrs={'id': 'torrenttable'})
                        torrent_rows = torrent_table.find_all('tr') if torrent_table else []

                        # Continue only if one Release is found
                        if len(torrent_rows) < 2:
                            sickrage.srCore.srLogger.debug("Data returned from provider does not contain any torrents")
                            continue

                        for result in torrent_table.find_all('tr')[1:]:

                            try:
                                link = result.find('td', attrs={'class': 'name'}).find('a')
                                url = result.find('td', attrs={'class': 'quickdownload'}).find('a')
                                title = link.string
                                download_url = url['href']
                                seeders = tryInt(result.find('td', attrs={'class': 'seeders'}).text, 0)
                                leechers = tryInt(result.find('td', attrs={'class': 'leechers'}).text, 0)

                                size = -1
                                if re.match(r'\d+([,\.]\d+)?\s*[KkMmGgTt]?[Bb]',
                                            result('td', class_="listcolumn")[1].text):
                                    size = convert_size(result('td', class_="listcolumn")[1].text.strip())
                            except (AttributeError, TypeError):
                                continue

                            if not all([title, download_url]):
                                continue

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    sickrage.srCore.srLogger.debug(
                                        "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                            title, seeders, leechers))
                                continue

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                sickrage.srCore.srLogger.debug("Found result: %s " % title)

                            items[mode].append(item)

                except Exception:
                    sickrage.srCore.srLogger.error(
                        "Failed parsing provider. Traceback: {}".format(traceback.format_exc()))

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 16
0
    def sendNZB(nzb, proper=False):
        """
        Sends NZB to NZBGet client

        :param nzb: nzb object
        :param proper: True if this is a Proper download, False if not. Defaults to False
        """
        addToTop = False
        nzbgetprio = 0
        category = sickrage.srCore.srConfig.NZBGET_CATEGORY
        if nzb.show.is_anime:
            category = sickrage.srCore.srConfig.NZBGET_CATEGORY_ANIME

        if sickrage.srCore.srConfig.NZBGET_USE_HTTPS:
            nzbgetXMLrpc = "https://%(username)s:%(password)s@%(host)s/xmlrpc"
        else:
            nzbgetXMLrpc = "http://%(username)s:%(password)s@%(host)s/xmlrpc"

        if sickrage.srCore.srConfig.NZBGET_HOST is None:
            sickrage.srCore.srLogger.error(
                "No NZBget host found in configuration. Please configure it.")
            return False

        url = nzbgetXMLrpc % {
            "host": sickrage.srCore.srConfig.NZBGET_HOST,
            "username": sickrage.srCore.srConfig.NZBGET_USERNAME,
            "password": sickrage.srCore.srConfig.NZBGET_PASSWORD
        }

        nzbGetRPC = xmlrpclib.ServerProxy(url)
        try:
            if nzbGetRPC.writelog(
                    "INFO",
                    "SiCKRAGE connected to drop of %s any moment now." %
                (nzb.name + ".nzb")):
                sickrage.srCore.srLogger.debug(
                    "Successful connected to NZBget")
            else:
                sickrage.srCore.srLogger.error(
                    "Successful connected to NZBget, but unable to send a message"
                )

        except httplib.socket.error:
            sickrage.srCore.srLogger.error(
                "Please check your NZBget host and port (if it is running). NZBget is not responding to this combination"
            )
            return False

        except xmlrpclib.ProtocolError as e:
            if e.errmsg == "Unauthorized":
                sickrage.srCore.srLogger.error(
                    "NZBget username or password is incorrect.")
            else:
                sickrage.srCore.srLogger.error("Protocol Error: " + e.errmsg)
            return False

        dupekey = ""
        dupescore = 0
        # if it aired recently make it high priority and generate DupeKey/Score
        for curEp in nzb.episodes:
            if dupekey == "":
                if curEp.show.indexer == 1:
                    dupekey = "SiCKRAGE-" + str(curEp.show.indexerid)
                elif curEp.show.indexer == 2:
                    dupekey = "SiCKRAGE-tvr" + str(curEp.show.indexerid)
            dupekey += "-" + str(curEp.season) + "." + str(curEp.episode)
            if date.today() - curEp.airdate <= timedelta(days=7):
                addToTop = True
                nzbgetprio = sickrage.srCore.srConfig.NZBGET_PRIORITY
            else:
                category = sickrage.srCore.srConfig.NZBGET_CATEGORY_BACKLOG
                if nzb.show.is_anime:
                    category = sickrage.srCore.srConfig.NZBGET_CATEGORY_ANIME_BACKLOG

        if nzb.quality != Quality.UNKNOWN:
            dupescore = nzb.quality * 100
        if proper:
            dupescore += 10

        nzbcontent64 = None
        if nzb.resultType == "nzbdata":
            data = nzb.extraInfo[0]
            nzbcontent64 = standard_b64encode(data)

        sickrage.srCore.srLogger.info("Sending NZB to NZBget")
        sickrage.srCore.srLogger.debug("URL: " + url)

        try:
            # Find out if nzbget supports priority (Version 9.0+), old versions beginning with a 0.x will use the old command
            nzbget_version_str = nzbGetRPC.version()
            nzbget_version = tryInt(
                nzbget_version_str[:nzbget_version_str.find(".")])
            if nzbget_version == 0:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, addToTop,
                                                     nzbcontent64)
                else:
                    if nzb.resultType == "nzb":
                        try:
                            nzbcontent64 = standard_b64encode(
                                sickrage.srCore.srWebSession.get(nzb.url).text)
                        except Exception:
                            return False
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, addToTop,
                                                     nzbcontent64)
            elif nzbget_version == 12:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, nzbgetprio,
                                                     False, nzbcontent64,
                                                     False, dupekey, dupescore,
                                                     "score")
                else:
                    nzbget_result = nzbGetRPC.appendurl(
                        nzb.name + ".nzb", category, nzbgetprio, False,
                        nzb.url, False, dupekey, dupescore, "score")
            # v13+ has a new combined append method that accepts both (url and content)
            # also the return value has changed from boolean to integer
            # (Positive number representing NZBID of the queue item. 0 and negative numbers represent error codes.)
            elif nzbget_version >= 13:
                nzbget_result = True if nzbGetRPC.append(
                    nzb.name + ".nzb", nzbcontent64 if nzbcontent64 is not None
                    else nzb.url, category, nzbgetprio, False, False, dupekey,
                    dupescore, "score") > 0 else False
            else:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, nzbgetprio,
                                                     False, nzbcontent64)
                else:
                    nzbget_result = nzbGetRPC.appendurl(
                        nzb.name + ".nzb", category, nzbgetprio, False,
                        nzb.url)

            if nzbget_result:
                sickrage.srCore.srLogger.debug(
                    "NZB sent to NZBget successfully")
                return True
            else:
                sickrage.srCore.srLogger.error(
                    "NZBget could not add %s to the queue" %
                    (nzb.name + ".nzb"))
                return False
        except Exception:
            sickrage.srCore.srLogger.error(
                "Connect Error to NZBget: could not add %s to the queue" %
                (nzb.name + ".nzb"))
            return False
Ejemplo n.º 17
0
    def search(self,
               search_params,
               search_mode='eponly',
               epcount=0,
               age=0,
               epObj=None):
        results = []
        if not self.login():
            return results

        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_params.keys():
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s " %
                                                   search_string)

                searchURL = self.urls['search'] % (search_string.replace(
                    '(', '').replace(')', ''))
                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)

                # returns top 15 results by default, expandable in user profile to 100
                try:
                    data = sickrage.srCore.srWebSession.get(searchURL,
                                                            cache=False).text
                except Exception:
                    sickrage.srCore.srLogger.debug(
                        "No data returned from provider")
                    continue

                with bs4_parser(data) as html:
                    torrent_rows = html.find_all('tr', class_='torrent')
                    if len(torrent_rows) < 1:
                        sickrage.srCore.srLogger.debug(
                            "Data returned from provider does not contain any torrents"
                        )
                        continue

                    for result in torrent_rows:
                        try:
                            # skip if torrent has been nuked due to poor quality
                            if result.find('img', alt='Nuked'):
                                continue

                            download_url = urljoin(
                                self.urls['base_url'] + '/',
                                result.find('span',
                                            title='Download').parent['href'])
                            title = result.find(
                                'a', title='View torrent').get_text(strip=True)

                            if not all([title, download_url]):
                                continue

                            seeders = tryInt(
                                result('td', class_="number_column")[1].text,
                                0)
                            leechers = tryInt(
                                result('td', class_="number_column")[2].text,
                                0)

                            size = -1
                            if re.match(
                                    r'\d+([,\.]\d+)?\s*[KkMmGgTt]?[Bb]',
                                    result('td',
                                           class_="number_column")[0].text):
                                size = convert_size(
                                    result('td', class_="number_column")
                                    [0].text.strip())

                            # Filter unseeded torrent
                            if seeders < self.minseed or leechers < self.minleech:
                                if mode != 'RSS':
                                    sickrage.srCore.srLogger.debug(
                                        "Discarding torrent because it doesn't meet the minimum seeders or leechers: {} (S:{} L:{})"
                                        .format(title, seeders, leechers))
                                continue

                            item = title, download_url, size, seeders, leechers
                            if mode != 'RSS':
                                sickrage.srCore.srLogger.debug(
                                    "Found result: {}".format(title))

                            items[mode].append(item)
                        except StandardError:
                            continue

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 18
0
    def search(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        freeleech = '3' if self.freeleech else '0'

        if not self._doLogin():
            return results

        for mode in search_params.keys():
            sickrage.srCore.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    sickrage.srCore.srLogger.debug("Search string: %s " % search_string)

                searchURL = self.urls['search'] % (freeleech, search_string)
                sickrage.srCore.srLogger.debug("Search URL: %s" % searchURL)
                max_page_number = 0

                try:
                    data = sickrage.srCore.srWebSession.get(searchURL).text
                except Exception:
                    sickrage.srCore.srLogger.debug("No data returned from provider")
                    continue

                try:
                    with bs4_parser(data) as html:

                        # Check to see if there is more than 1 page of results
                        pager = html.find('div', {'class': 'pager'})
                        if pager:
                            page_links = pager.find_all('a', href=True)
                        else:
                            page_links = []

                        if len(page_links) > 0:
                            for lnk in page_links:
                                link_text = lnk.text.strip()
                                if link_text.isdigit():
                                    page_int = int(link_text)
                                    if page_int > max_page_number:
                                        max_page_number = page_int

                        # limit page number to 15 just in case something goes wrong
                        if max_page_number > 15:
                            max_page_number = 15
                        # limit RSS search
                        if max_page_number > 3 and mode == 'RSS':
                            max_page_number = 3
                except Exception:
                    sickrage.srCore.srLogger.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())
                    continue

                data_response_list = [data]

                # Freshon starts counting pages from zero, even though it displays numbers from 1
                if max_page_number > 1:
                    for i in range(1, max_page_number):
                        time.sleep(1)
                        page_searchURL = searchURL + '&page=' + str(i)

                        try:
                            page_html = sickrage.srCore.srWebSession.get(page_searchURL).text
                        except Exception:
                            continue

                        data_response_list.append(page_html)

                try:

                    for data in data_response_list:

                        with bs4_parser(data) as html:

                            torrent_rows = html.findAll("tr", {"class": re.compile('torrent_[0-9]*')})

                            # Continue only if a Release is found
                            if len(torrent_rows) == 0:
                                sickrage.srCore.srLogger.debug("Data returned from provider does not contain any torrents")
                                continue

                            for individual_torrent in torrent_rows:

                                # skip if torrent has been nuked due to poor quality
                                if individual_torrent.find('img', alt='Nuked') is not None:
                                    continue

                                try:
                                    title = individual_torrent.find('a', {'class': 'torrent_name_link'})['title']
                                except Exception:
                                    sickrage.srCore.srLogger.warning(
                                        "Unable to parse torrent title. Traceback: %s " % traceback.format_exc())
                                    continue

                                try:
                                    details_url = individual_torrent.find('a', {'class': 'torrent_name_link'})['href']
                                    torrent_id = int((re.match('.*?([0-9]+)$', details_url).group(1)).strip())
                                    download_url = self.urls['download'] % (str(torrent_id))
                                    seeders = tryInt(individual_torrent.find('td', {'class': 'table_seeders'}).find(
                                        'span').text.strip(), 1)
                                    leechers = tryInt(individual_torrent.find('td', {'class': 'table_leechers'}).find(
                                        'a').text.strip(), 0)
                                    # FIXME
                                    size = -1
                                except Exception:
                                    continue

                                if not all([title, download_url]):
                                    continue

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        sickrage.srCore.srLogger.debug(
                                            "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                                title, seeders, leechers))
                                    continue

                                item = title, download_url, size, seeders, leechers
                                if mode != 'RSS':
                                    sickrage.srCore.srLogger.debug("Found result: %s " % title)

                                items[mode].append(item)

                except Exception:
                    sickrage.srCore.srLogger.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
Ejemplo n.º 19
0
    def loadFromDB(self, season, episode):
        sickrage.LOGGER.debug("%s: Loading episode details from DB for episode %s S%02dE%02d" % (
            self.show.indexerid, self.show.name, season or 0, episode or 0))

        sqlResults = main_db.MainDB().select(
                "SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
                [self.show.indexerid, season, episode])

        if len(sqlResults) > 1:
            raise MultipleEpisodesInDatabaseException("Your DB has two records for the same show somehow.")
        elif len(sqlResults) == 0:
            sickrage.LOGGER.debug("%s: Episode S%02dE%02d not found in the database" % (
                self.show.indexerid, self.season or 0, self.episode or 0))
            return False
        else:
            # NAMEIT sickrage.LOGGER.info(u"AAAAA from" + str(self.season)+"x"+str(self.episode) + " -" + self.name + " to " + str(sqlResults[0][b"name"]))
            if sqlResults[0][b"name"]:
                self.name = sqlResults[0][b"name"]

            self.season = season
            self.episode = episode
            self.absolute_number = sqlResults[0][b"absolute_number"]
            self.description = sqlResults[0][b"description"]
            if not self.description:
                self.description = ""
            if sqlResults[0][b"subtitles"] and sqlResults[0][b"subtitles"]:
                self.subtitles = sqlResults[0][b"subtitles"].split(",")
            self.subtitles_searchcount = sqlResults[0][b"subtitles_searchcount"]
            self.subtitles_lastsearch = sqlResults[0][b"subtitles_lastsearch"]
            self.airdate = datetime.date.fromordinal(int(sqlResults[0][b"airdate"]))
            # sickrage.LOGGER.debug(u"1 Status changes from " + str(self.status) + " to " + str(sqlResults[0][b"status"]))
            self.status = int(sqlResults[0][b"status"] or -1)

            # don't overwrite my location
            if sqlResults[0][b"location"] and sqlResults[0][b"location"]:
                self.location = os.path.normpath(sqlResults[0][b"location"])
            if sqlResults[0][b"file_size"]:
                self.file_size = int(sqlResults[0][b"file_size"])
            else:
                self.file_size = 0

            self.indexerid = int(sqlResults[0][b"indexerid"])
            self.indexer = int(sqlResults[0][b"indexer"])

            xem_refresh(self.show.indexerid, self.show.indexer)

            self.scene_season = tryInt(sqlResults[0][b"scene_season"], 0)
            self.scene_episode = tryInt(sqlResults[0][b"scene_episode"], 0)
            self.scene_absolute_number = tryInt(sqlResults[0][b"scene_absolute_number"], 0)

            if self.scene_absolute_number == 0:
                self.scene_absolute_number = get_scene_absolute_numbering(
                        self.show.indexerid,
                        self.show.indexer,
                        self.absolute_number
                )

            if self.scene_season == 0 or self.scene_episode == 0:
                self.scene_season, self.scene_episode = get_scene_numbering(
                        self.show.indexerid,
                        self.show.indexer,
                        self.season, self.episode
                )

            if sqlResults[0][b"release_name"] is not None:
                self.release_name = sqlResults[0][b"release_name"]

            if sqlResults[0][b"is_proper"]:
                self.is_proper = int(sqlResults[0][b"is_proper"])

            if sqlResults[0][b"version"]:
                self.version = int(sqlResults[0][b"version"])

            if sqlResults[0][b"release_group"] is not None:
                self.release_group = sqlResults[0][b"release_group"]

            self.dirty = False
            return True