示例#1
0
    def parse_download(self, series_url, title, language_id):
        if not check_valid_release(title, False, False, self.dbfile):
            self.log_debug(title + u" - Release ignoriert (Gleiche oder bessere Quelle bereits vorhanden)")
            return
        try:
            series_info = get_url(series_url, self.configfile, self.dbfile)
            series_id = re.findall(r'data-mediaid="(.*?)"', series_info)[0]
            api_url = 'https://' + self.dj + '/api/media/' + series_id + '/releases'

            response = get_url(api_url, self.configfile, self.dbfile, self.scraper)
            seasons = json.loads(response)
            for season in seasons:
                season = seasons[season]
                for item in season['items']:
                    if item['name'] == title:
                        valid = False
                        for hoster in item['hoster']:
                            if check_hoster(hoster, self.configfile):
                                valid = True
                        if not valid and not self.hoster_fallback:
                            storage = self.db.retrieve_all(title)
                            if 'added' not in storage and 'notdl' not in storage:
                                wrong_hoster = '[DJ/Hoster fehlt] - ' + title
                                if 'wrong_hoster' not in storage:
                                    print(wrong_hoster)
                                    self.db.store(title, 'wrong_hoster')
                                    notify([wrong_hoster], self.configfile)
                                else:
                                    self.log_debug(wrong_hoster)
                        else:
                            return self.send_package(title, series_url, language_id)
        except:
            print(u"DJ hat die Doku-API angepasst. Breche Download-Prüfung ab!")
示例#2
0
文件: dj.py 项目: rix1337/RSScrawler
 def send_package(self, title, links, englisch_info, genre):
     if genre == "Doku":
         genre = ""
     else:
         genre = "/" + genre
     englisch = ""
     if englisch_info:
         englisch = "Englisch - "
     if self.filename == 'DJ_Dokus_Regex':
         link_placeholder = '[Doku' + genre + '/RegEx] - ' + englisch
     else:
         link_placeholder = '[Doku' + genre + '] - ' + englisch
     try:
         storage = self.db.retrieve(title)
     except Exception as e:
         self.log_debug(
             "Fehler bei Datenbankzugriff: %s, Grund: %s" % (e, title))
         return
     if storage == 'added':
         self.log_debug(title + " - Release ignoriert (bereits gefunden)")
     else:
         self.device = myjd_download(self.configfile, self.device, title, "RSScrawler", links,
                                     decode_base64("ZG9rdWp1bmtpZXMub3Jn"))
         if self.device:
             self.db.store(title, 'added')
             log_entry = link_placeholder + title
             self.log_info(log_entry)
             notify([log_entry], self.configfile)
             return log_entry
示例#3
0
    def send_package(self, title, series_url, language_id):
        englisch = ""
        if language_id == 2:
            englisch = "/Englisch"
        if self.filename == 'SJ_Serien_Regex':
            link_placeholder = '[Episode/RegEx' + englisch + '] - '
        elif self.filename == 'SJ_Serien':
            link_placeholder = '[Episode' + englisch + '] - '
        elif self.filename == 'SJ_Staffeln_Regex]':
            link_placeholder = '[Staffel/RegEx' + englisch + '] - '
        else:
            link_placeholder = '[Staffel' + englisch + '] - '
        try:
            storage = self.db.retrieve_all(title)
        except Exception as e:
            self.log_debug("Fehler bei Datenbankzugriff: %s, Grund: %s" %
                           (e, title))
            return

        if 'added' in storage or 'notdl' in storage:
            self.log_debug(title + " - Release ignoriert (bereits gefunden)")
        else:
            download = add_decrypt(title, series_url, self.sj, self.dbfile)
            if download:
                self.db.store(title, 'added')
                log_entry = link_placeholder + title + ' - [SJ]'
                self.log_info(log_entry)
                notify(["[Click'n'Load notwendig] - " + log_entry],
                       self.configfile)
                return log_entry
示例#4
0
文件: dd.py 项目: evilmon/RSScrawler
    def periodical_task(self):
        feeds = self.config.get("feeds")
        if feeds:
            added_items = []
            feeds = feeds.replace(" ", "").split(',')
            for feed in feeds:
                feed = feedparser.parse(
                    get_url(feed, self.configfile, self.dbfile, self.scraper))
                for post in feed.entries:
                    key = post.title.replace(" ", ".")

                    epoch = datetime(1970, 1, 1)
                    current_epoch = int(time())
                    published_format = "%Y-%m-%d %H:%M:%S+00:00"
                    published_timestamp = str(parser.parse(post.published))
                    published_epoch = int((datetime.strptime(
                        published_timestamp, published_format) -
                                           epoch).total_seconds())
                    if (current_epoch - 1800) > published_epoch:
                        link_pool = post.summary
                        unicode_links = re.findall(r'(http.*)', link_pool)
                        links = []
                        for link in unicode_links:
                            if check_hoster(link, self.configfile):
                                links.append(str(link))
                        if self.config.get("hoster_fallback") and not links:
                            for link in unicode_links:
                                links.append(str(link))
                        storage = self.db.retrieve_all(key)
                        if not links:
                            if 'added' not in storage and 'notdl' not in storage:
                                wrong_hoster = '[DD/Hoster fehlt] - ' + key
                                if 'wrong_hoster' not in storage:
                                    self.log_info(wrong_hoster)
                                    self.db.store(key, 'wrong_hoster')
                                    notify([wrong_hoster], self.configfile)
                                else:
                                    self.log_debug(wrong_hoster)
                        elif 'added' in storage:
                            self.log_debug(
                                "%s - Release ignoriert (bereits gefunden)" %
                                key)
                        else:
                            self.device = myjd_download(
                                self.configfile, self.dbfile, self.device, key,
                                "RSScrawler", links, "")
                            if self.device:
                                self.db.store(key, 'added')
                                log_entry = '[Englisch] - ' + key + ' - [DD]'
                                self.log_info(log_entry)
                                notify([log_entry], self.configfile)
                                added_items.append(log_entry)
                    else:
                        self.log_debug(
                            "%s - Releasezeitpunkt weniger als 30 Minuten in der Vergangenheit - wird ignoriert."
                            % key)
        else:
            self.log_debug("Liste ist leer. Stoppe Suche für DD!")
        return self.device
示例#5
0
    def parse_download(self, series_url, title, language_id):
        if not check_valid_release(title, self.retail_only, self.hevc_retail,
                                   self.dbfile):
            self.log_debug(
                title +
                u" - Release ignoriert (Gleiche oder bessere Quelle bereits vorhanden)"
            )
            return
        if self.filename == 'MB_Staffeln':
            if not self.config.get("seasonpacks"):
                staffelpack = re.search(r"s\d.*(-|\.).*s\d", title.lower())
                if staffelpack:
                    self.log_debug("%s - Release ignoriert (Staffelpaket)" %
                                   title)
                    return
            if not re.search(self.seasonssource, title.lower()):
                self.log_debug(title + " - Release hat falsche Quelle")
                return
        try:
            series_info = get_url(series_url, self.configfile, self.dbfile)
            series_id = re.findall(r'data-mediaid="(.*?)"', series_info)[0]
            api_url = 'https://' + self.sj + '/api/media/' + series_id + '/releases'

            response = get_url(api_url, self.configfile, self.dbfile,
                               self.scraper)
            seasons = json.loads(response)
            for season in seasons:
                season = seasons[season]
                for item in season['items']:
                    if item['name'] == title:
                        valid = False
                        for hoster in item['hoster']:
                            if hoster:
                                if check_hoster(hoster, self.configfile):
                                    valid = True
                        if not valid and not self.hoster_fallback:
                            storage = self.db.retrieve_all(title)
                            if 'added' not in storage and 'notdl' not in storage:
                                wrong_hoster = '[SJ/Hoster fehlt] - ' + title
                                if 'wrong_hoster' not in storage:
                                    print(wrong_hoster)
                                    self.db.store(title, 'wrong_hoster')
                                    notify([wrong_hoster], self.configfile)
                                else:
                                    self.log_debug(wrong_hoster)
                        else:
                            return self.send_package(title, series_url,
                                                     language_id)
        except:
            print(
                u"SJ hat die Serien-API angepasst. Breche Download-Prüfung ab!"
            )
示例#6
0
文件: dd.py 项目: rix1337/RSScrawler
    def periodical_task(self):
        feeds = self.config.get("feeds")
        if feeds:
            added_items = []
            feeds = feeds.replace(" ", "").split(',')
            hoster = re.compile(self.config.get("hoster"))
            for feed in feeds:
                feed = feedparser.parse(get_url(feed, self.configfile, self.dbfile))
                for post in feed.entries:
                    key = post.title.replace(" ", ".")

                    epoch = datetime(1970, 1, 1)
                    current_epoch = int(time())
                    published_format = "%Y-%m-%d %H:%M:%S+00:00"
                    published_timestamp = str(parser.parse(post.published))
                    published_epoch = int((datetime.strptime(
                        published_timestamp, published_format) - epoch).total_seconds())
                    if (current_epoch - 1800) > published_epoch:
                        link_pool = post.summary
                        unicode_links = re.findall(r'(http.*)', link_pool)
                        links = []
                        for link in unicode_links:
                            if re.match(hoster, link):
                                links.append(str(link))
                        if not links:
                            self.log_debug(
                                "%s - Release ignoriert (kein passender Link gefunden)" % key)
                        elif self.db.retrieve(key) == 'added':
                            self.log_debug(
                                "%s - Release ignoriert (bereits gefunden)" % key)
                        else:
                            self.device = myjd_download(self.configfile, self.device, key, "RSScrawler", links, "")
                            if self.device:
                                self.db.store(
                                    key,
                                    'added'
                                )
                                log_entry = '[DD] - Englisch - ' + key
                                self.log_info(log_entry)
                                notify([log_entry], self.configfile)
                                added_items.append(log_entry)
                    else:
                        self.log_debug(
                            "%s - Releasezeitpunkt weniger als 30 Minuten in der Vergangenheit - wird ignoriert." % key)
        else:
            self.log_debug("Liste ist leer. Stoppe Suche für DD!")
        return self.device
示例#7
0
文件: sf.py 项目: evilmon/RSScrawler
    def parse_download(self, series_url, title, language_id):
        if not check_valid_release(title, self.retail_only, self.hevc_retail,
                                   self.dbfile):
            self.log_debug(
                title +
                u" - Release ignoriert (Gleiche oder bessere Quelle bereits vorhanden)"
            )
            return
        if self.filename == 'MB_Staffeln':
            if not self.config.get("seasonpacks"):
                staffelpack = re.search(r"s\d.*(-|\.).*s\d", title.lower())
                if staffelpack:
                    self.log_debug("%s - Release ignoriert (Staffelpaket)" %
                                   title)
                    return
            if not re.search(self.seasonssource, title.lower()):
                self.log_debug(title + " - Release hat falsche Quelle")
                return
        try:
            if language_id == 2:
                lang = 'EN'
            else:
                lang = 'DE'
            epoch = str(datetime.datetime.now().timestamp()).replace('.',
                                                                     '')[:-3]
            api_url = series_url + '?lang=' + lang + '&_=' + epoch
            response = get_url(api_url, self.configfile, self.dbfile,
                               self.scraper)
            info = json.loads(response)

            is_episode = re.findall(r'.*\.(s\d{1,3}e\d{1,3})\..*', title,
                                    re.IGNORECASE)
            if is_episode:
                episode_string = re.findall(r'.*S\d{1,3}(E\d{1,3}).*',
                                            is_episode[0])[0].lower()
                season_string = re.findall(r'.*(S\d{1,3})E\d{1,3}.*',
                                           is_episode[0])[0].lower()
                season_title = rreplace(
                    title.lower().replace(episode_string, ''), "-", ".*",
                    1).lower()
                season_title = season_title.replace(".untouched",
                                                    ".*").replace(
                                                        ".dd+51", ".dd.51")
                episode = str(int(episode_string.replace("e", "")))
                season = str(int(season_string.replace("s", "")))
                episode_name = re.findall(r'.*\.s\d{1,3}(\..*).german',
                                          season_title, re.IGNORECASE)
                if episode_name:
                    season_title = season_title.replace(episode_name[0], '')
                codec_tags = [".h264", ".x264"]
                for tag in codec_tags:
                    season_title = season_title.replace(tag, ".*264")
                web_tags = [".web-rip", ".webrip", ".webdl", ".web-dl"]
                for tag in web_tags:
                    season_title = season_title.replace(tag, ".web.*")
            else:
                season = False
                episode = False
                season_title = title
                multiple_episodes = re.findall(r'(e\d{1,3}-e*\d{1,3}\.)',
                                               season_title, re.IGNORECASE)
                if multiple_episodes:
                    season_title = season_title.replace(
                        multiple_episodes[0], '.*')

            content = BeautifulSoup(info['html'], 'lxml')
            releases = content.find(
                "small", text=re.compile(season_title,
                                         re.IGNORECASE)).parent.parent.parent
            links = releases.findAll("div", {'class': 'row'})[1].findAll('a')
            valid = False
            for link in links:
                download_link = link['href']
                if check_hoster(link.text.replace('\n', ''), self.configfile):
                    valid = True
                    break
            if not valid and not self.hoster_fallback:
                storage = self.db.retrieve_all(title)
                if 'added' not in storage and 'notdl' not in storage:
                    wrong_hoster = '[SF/Hoster fehlt] - ' + title
                    if 'wrong_hoster' not in storage:
                        self.log_info(wrong_hoster)
                        self.db.store(title, 'wrong_hoster')
                        notify([wrong_hoster], self.configfile)
                    else:
                        self.log_debug(wrong_hoster)
            else:
                return self.send_package(title, download_link, language_id,
                                         season, episode)
        except:
            print(
                u"SF hat die Serien-API angepasst. Breche Download-Prüfung ab!"
            )
示例#8
0
def download_sj(sj_id, special, device, configfile, dbfile):
    url = get_url(decode_base64("aHR0cDovL3Nlcmllbmp1bmtpZXMub3JnLz9jYXQ9") + str(sj_id), configfile, dbfile)
    season_pool = re.findall(r'<h2>Staffeln:(.*?)<h2>Feeds', url).pop()
    season_links = re.findall(
        r'href="(.{1,125})">.{1,90}(Staffel|Season).*?(\d{1,2}-?\d{1,2}|\d{1,2})', season_pool)
    title = html_to_str(re.findall(r'>(.{1,85}?) &#', season_pool).pop())

    rsscrawler = RssConfig('RSScrawler', configfile)

    listen = ["SJ_Serien", "MB_Staffeln"]
    for liste in listen:
        cont = ListDb(dbfile, liste).retrieve()
        list_title = sanitize(title)
        if not cont:
            cont = ""
        if not list_title in cont:
            ListDb(dbfile, liste).store(list_title)

    staffeln = []
    staffel_nr = []
    seasons = []

    for s in season_links:
        if "staffel" in s[1].lower():
            staffeln.append([s[2], s[0]])
            if "-" in s[2]:
                split = s[2].split("-")
                split = range(int(split[0]), int(split[1]) + 1)
                for nr in split:
                    staffel_nr.append(str(nr))
            else:
                staffel_nr.append(s[2])
        else:
            seasons.append([s[2], s[0]])

    if rsscrawler.get("english"):
        for se in seasons:
            if not se[0] in staffel_nr:
                staffeln.append(se)

    to_dl = []
    for s in staffeln:
        if "-" in s[0]:
            split = s[0].split("-")
            split = range(int(split[0]), int(split[1]) + 1)
            for i in split:
                to_dl.append([str(i), s[1]])
        else:
            to_dl.append([s[0], s[1]])

    found_seasons = {}
    for dl in to_dl:
        if len(dl[0]) is 1:
            sxx = "S0" + str(dl[0])
        else:
            sxx = "S" + str(dl[0])
        link = dl[1]
        if sxx not in found_seasons:
            found_seasons[sxx] = link

    something_found = False

    for sxx, link in found_seasons.items():
        config = RssConfig('SJ', configfile)
        quality = config.get('quality')
        url = get_url(link, configfile, dbfile)
        pakete = re.findall(re.compile(r'<p><strong>(.*?\.' + sxx + r'\..*?' + quality +
                                       r'.*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
                            url)
        folgen = re.findall(re.compile(r'<p><strong>(.*?\.' + sxx +
                                       r'E\d{1,3}.*?' + quality + r'.*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
                            url)
        lq_pakete = re.findall(re.compile(
            r'<p><strong>(.*?\.' + sxx + r'\..*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
            url)
        lq_folgen = re.findall(re.compile(
            r'<p><strong>(.*?\.' + sxx + r'E\d{1,3}.*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
            url)

        if not pakete and not folgen and not lq_pakete and not lq_folgen:
            sxx = sxx.replace("S0", "S")
            pakete = re.findall(re.compile(r'<p><strong>(.*?\.' + sxx + r'\..*?' + quality +
                                           r'.*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
                                url)
            folgen = re.findall(re.compile(
                r'<p><strong>(.*?\.' + sxx + r'E\d{1,3}.*?' + quality + r'.*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
                url)
            lq_pakete = re.findall(re.compile(
                r'<p><strong>(.*?\.' + sxx + r'\..*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
                url)
            lq_folgen = re.findall(re.compile(
                r'<p><strong>(.*?\.' + sxx + r'E\d{1,3}.*?)<.*?\n.*?href="(.*?)".*? \| (.*)<(?:.*?\n.*?href="(.*?)".*? \| (.*)<|)'),
                url)

        if special and "e" in special.lower():
            pakete = []
            lq_pakete = []

        best_matching_links = []

        if pakete:
            links = []
            for x in pakete:
                title = x[0]
                score = rate(title, configfile)
                hoster = [[x[2], x[1]], [x[4], x[3]]]
                if special:
                    if special.lower() in title.lower():
                        links.append([score, title, hoster])
                else:
                    links.append([score, title, hoster])
            if links:
                highest_score = sorted(links, reverse=True)[0][0]
                for l in links:
                    if l[0] == highest_score:
                        for hoster in l[2]:
                            best_matching_links.append(
                                [l[1], hoster[0], hoster[1]])
        elif folgen:
            links = []
            for x in folgen:
                title = x[0]
                score = rate(title, configfile)
                hoster = [[x[2], x[1]], [x[4], x[3]]]
                if special:
                    if special.lower() in title.lower():
                        links.append([score, title, hoster])
                else:
                    links.append([score, title, hoster])
            if links:
                highest_score = sorted(links, reverse=True)[0][0]
                for l in links:
                    if l[0] == highest_score:
                        for hoster in l[2]:
                            best_matching_links.append(
                                [l[1], hoster[0], hoster[1]])
        elif lq_pakete:
            links = []
            for x in lq_pakete:
                title = x[0]
                score = rate(title, configfile)
                hoster = [[x[2], x[1]], [x[4], x[3]]]
                if special:
                    if special.lower() in title.lower():
                        links.append([score, title, hoster])
                else:
                    links.append([score, title, hoster])
            if links:
                highest_score = sorted(links, reverse=True)[0][0]
                for l in links:
                    if l[0] == highest_score:
                        for hoster in l[2]:
                            best_matching_links.append(
                                [l[1], hoster[0], hoster[1]])
        elif lq_folgen:
            links = []
            for x in lq_folgen:
                title = x[0]
                score = rate(title, configfile)
                hoster = [[x[2], x[1]], [x[4], x[3]]]
                if special:
                    if special.lower() in title.lower():
                        links.append([score, title, hoster])
                else:
                    links.append([score, title, hoster])
            if links:
                highest_score = sorted(links, reverse=True)[0][0]
                for l in links:
                    if l[0] == highest_score:
                        for hoster in l[2]:
                            best_matching_links.append(
                                [l[1], hoster[0], hoster[1]])

        notify_array = []
        for best_link in best_matching_links:
            dl_title = best_link[0].replace(
                "Staffelpack ", "").replace("Staffelpack.", "")
            dl_hoster = best_link[1]
            dl_link = best_link[2]
            config = RssConfig('SJ', configfile)
            hoster = re.compile(config.get('hoster'))
            db = RssDb(dbfile, 'rsscrawler')

            if re.match(hoster, dl_hoster.lower()):
                if myjd_download(configfile, device, dl_title, "RSScrawler", dl_link,
                                 decode_base64("c2VyaWVuanVua2llcy5vcmc=")):
                    db.store(dl_title, 'added')
                    log_entry = '[Suche/Serie] - ' + dl_title
                    logging.info(log_entry)
                    notify_array.append(log_entry)
                else:
                    return False
        if len(best_matching_links) > 0:
            something_found = True
        notify(notify_array, configfile)
    if not something_found:
        return False
    return True
示例#9
0
def download_bl(payload, device, configfile, dbfile):
    payload = decode_base64(payload).split(";")
    link = payload[0]
    password = payload[1]
    url = get_url(link, configfile, dbfile)
    config = RssConfig('MB', configfile)
    hoster = re.compile(config.get('hoster'))
    db = RssDb(dbfile, 'rsscrawler')

    soup = BeautifulSoup(url, 'lxml')
    download = soup.find("div", {"id": "content"})
    try:
        key = re.findall(r'Permanent Link: (.*?)"', str(download)).pop()
        url_hosters = re.findall(r'href="([^"\'>]*)".+?(.+?)<', str(download))
    except:
        items_head = soup.find("div", {"class": "topbox"})
        key = items_head.contents[1].a["title"]
        items_download = soup.find("div", {"class": "download"})
        url_hosters = []
        download = items_download.find_all("span", {"style": "display:inline;"}, text=True)
        for link in download:
            link = link.a
            text = link.text.strip()
            if text:
                url_hosters.append([str(link["href"]), str(text)])

    links = {}
    for url_hoster in reversed(url_hosters):
        if not decode_base64("bW92aWUtYmxvZy50by8=") in url_hoster[0] and "https://goo.gl/" not in url_hoster[0]:
            link_hoster = url_hoster[1].lower().replace('target="_blank">', '').replace(" ", "-")
            if re.match(hoster, link_hoster):
                links[link_hoster] = url_hoster[0]
    download_links = links.values() if six.PY2 else list(links.values())

    englisch = False
    if "*englisch*" in key.lower():
        key = key.replace('*ENGLISCH*', '').replace("*Englisch*", "")
        englisch = True

    staffel = re.search(r"s\d{1,2}(-s\d{1,2}|-\d{1,2}|\.)", key.lower())

    if config.get('enforcedl') and '.dl.' not in key.lower():
        fail = False
        get_imdb_url = url
        key_regex = r'<title>' + \
                    re.escape(
                        key) + r'.*?<\/title>\n.*?<link>(?:(?:.*?\n){1,25}).*?[mM][kK][vV].*?(?:|href=.?http(?:|s):\/\/(?:|www\.)imdb\.com\/title\/(tt[0-9]{7,9}).*?)[iI][mM][dD][bB].*?(?!\d(?:\.|\,)\d)(?:.|.*?)<\/a>'
        imdb_id = re.findall(key_regex, get_imdb_url)
        if len(imdb_id) > 0:
            if not imdb_id[0]:
                fail = True
            else:
                imdb_id = imdb_id[0]
        else:
            fail = True
        if fail:
            search_title = re.findall(
                r"(.*?)(?:\.(?:(?:19|20)\d{2})|\.German|\.\d{3,4}p|\.S(?:\d{1,3})\.)", key)[0].replace(".", "+")
            search_url = "http://www.imdb.com/find?q=" + search_title
            search_page = get_url(search_url, configfile, dbfile)
            search_results = re.findall(
                r'<td class="result_text"> <a href="\/title\/(tt[0-9]{7,9})\/\?ref_=fn_al_tt_\d" >(.*?)<\/a>.*? \((\d{4})\)..(.{9})',
                search_page)
            total_results = len(search_results)
            if staffel:
                imdb_id = search_results[0][0]
            else:
                no_series = False
                while total_results > 0:
                    attempt = 0
                    for result in search_results:
                        if result[3] == "TV Series":
                            no_series = False
                            total_results -= 1
                            attempt += 1
                        else:
                            no_series = True
                            imdb_id = search_results[attempt][0]
                            total_results = 0
                            break
                if no_series is False:
                    logging.debug(
                        "%s - Keine passende Film-IMDB-Seite gefunden" % key)

        if staffel:
            filename = 'MB_Staffeln'
        else:
            filename = 'MB_Filme'

        bl = BL(configfile, dbfile, device, logging, filename=filename)

        if not imdb_id:
            if not bl.dual_download(key, password):
                logging.debug(
                    "%s - Kein zweisprachiges Release gefunden." % key)
        else:
            if isinstance(imdb_id, list):
                imdb_id = imdb_id.pop()
            imdb_url = "http://www.imdb.com/title/" + imdb_id
            details = get_url(imdb_url, configfile, dbfile)
            if not details:
                logging.debug("%s - Originalsprache nicht ermittelbar" % key)
            original_language = re.findall(
                r"Language:<\/h4>\n.*?\n.*?url'>(.*?)<\/a>", details)
            if original_language:
                original_language = original_language[0]
            if original_language == "German":
                logging.debug(
                    "%s - Originalsprache ist Deutsch. Breche Suche nach zweisprachigem Release ab!" % key)
            else:
                if not bl.dual_download(key, password) and not englisch:
                    logging.debug(
                        "%s - Kein zweisprachiges Release gefunden! Breche ab." % key)

    if download_links:
        if staffel:
            if myjd_download(configfile, device, key, "RSScrawler", download_links, password):
                db.store(
                    key.replace(".COMPLETE", "").replace(".Complete", ""),
                    'notdl' if config.get(
                        'enforcedl') and '.dl.' not in key.lower() else 'added'
                )
                log_entry = '[Staffel] - ' + key.replace(".COMPLETE", "").replace(".Complete", "")
                logging.info(log_entry)
                notify([log_entry], configfile)
                return True
        elif '.3d.' in key.lower():
            retail = False
            if config.get('cutoff') and '.COMPLETE.' not in key.lower():
                if config.get('enforcedl'):
                    if cutoff(key, '2', dbfile):
                        retail = True
            if myjd_download(configfile, device, key, "RSScrawler/3Dcrawler", download_links, password):
                db.store(
                    key,
                    'notdl' if config.get(
                        'enforcedl') and '.dl.' not in key.lower() else 'added'
                )
                log_entry = '[Suche/Film] - ' + (
                    'Retail/' if retail else "") + '3D - ' + key
                logging.info(log_entry)
                notify([log_entry], configfile)
                return True
        else:
            retail = False
            if config.get('cutoff') and '.COMPLETE.' not in key.lower():
                if config.get('enforcedl'):
                    if cutoff(key, '1', dbfile):
                        retail = True
                else:
                    if cutoff(key, '0', dbfile):
                        retail = True
            if myjd_download(configfile, device, key, "RSScrawler", download_links, password):
                db.store(
                    key,
                    'notdl' if config.get(
                        'enforcedl') and '.dl.' not in key.lower() else 'added'
                )
                log_entry = '[Suche/Film] - ' + ('Englisch - ' if englisch and not retail else "") + (
                    'Englisch/Retail - ' if englisch and retail else "") + (
                                'Retail - ' if not englisch and retail else "") + key
                logging.info(log_entry)
                notify([log_entry], configfile)
                return True
    else:
        return False
示例#10
0
文件: yt.py 项目: rix1337/RSScrawler
    def periodical_task(self):
        if not self.config.get('youtube'):
            self.log_debug("Suche für YouTube deaktiviert!")
            return self.device
        added_items = []
        channels = []
        videos = []

        self.liste = self.read_input(self.youtube)

        if not self.liste:
            self.log_debug("Liste ist leer. Stoppe Suche für YouTube!")

        for item in self.liste:
            if len(item) > 0:
                if self.config.get("youtube") is False:
                    self.log_debug(
                        "Liste ist leer. Stoppe Suche für YouTube!")
                    return self.device
                channels.append(item)

        for channel in channels:
            if 'list=' in channel:
                id_cutter = channel.rfind('list=') + 5
                channel = channel[id_cutter:]
                url = 'https://www.youtube.com/playlist?list=' + channel
                response = get_url(url, self.configfile, self.dbfile)
            else:
                url = 'https://www.youtube.com/user/' + channel + '/videos'
                urlc = 'https://www.youtube.com/channel/' + channel + '/videos'
                cnotfound = False
                try:
                    response = get_url(url, self.configfile, self.dbfile)
                except HTTPError:
                    try:
                        response = get_url(urlc, self.configfile, self.dbfile)
                    except HTTPError:
                        cnotfound = True
                    if cnotfound:
                        self.log_debug("YouTube-Kanal: " +
                                       channel + " nicht gefunden!")
                        break

            links = re.findall(
                r'VideoRenderer":{"videoId":"(.*?)",".*?[Tt]ext":"(.*?)"}', response)

            maxvideos = int(self.config.get("maxvideos"))
            if maxvideos < 1:
                self.log_debug("Anzahl zu suchender YouTube-Videos (" +
                               str(maxvideos) + ") zu gering. Suche stattdessen 1 Video!")
                maxvideos = 1
            elif maxvideos > 50:
                self.log_debug("Anzahl zu suchender YouTube-Videos (" +
                               str(maxvideos) + ") zu hoch. Suche stattdessen maximal 50 Videos!")
                maxvideos = 50

            for link in links[:maxvideos]:
                if len(link[0]) > 10:
                    videos.append(
                        [link[0], link[1], channel])

        for video in videos:
            channel = video[2]
            title = video[1]
            if "[private" in title.lower() and "video]" in title.lower():
                self.log_debug(
                    "[%s] - YouTube-Video ignoriert (Privates Video)" % video)
                continue
            video_title = title.replace("&amp;", "&").replace("&gt;", ">").replace(
                "&lt;", "<").replace('&quot;', '"').replace("&#39;", "'").replace("\u0026", "&")
            video = video[0]
            download_link = 'https://www.youtube.com/watch?v=' + video
            if download_link:
                if self.db.retrieve(video) == 'added':
                    self.log_debug(
                        "[%s] - YouTube-Video ignoriert (bereits gefunden)" % video)
                else:
                    ignore = "|".join(["%s" % p for p in self.config.get("ignore").lower().split(
                        ',')]) if self.config.get("ignore") else r"^unmatchable$"
                    ignorevideo = re.search(ignore, video_title.lower())
                    if ignorevideo:
                        self.log_debug(video_title + " (" + channel + ") " +
                                       "[" + video + "] - YouTube-Video ignoriert (basierend auf ignore-Einstellung)")
                        continue
                    self.device = myjd_download(self.configfile, self.device, "YouTube/" + channel, "RSScrawler",
                                                download_link, "")
                    if self.device:
                        self.db.store(
                            video,
                            'added'
                        )
                        log_entry = '[YouTube] - ' + video_title + ' (' + channel + ')'
                        self.log_info(log_entry)
                        notify([log_entry], self.configfile)
                        added_items.append(log_entry)
        return self.device
示例#11
0
def crawldog(configfile, dbfile):
    disable_request_warnings(InsecureRequestWarning)
    crawljobs = RssConfig('Crawljobs', configfile)
    autostart = crawljobs.get("autostart")
    db = RssDb(dbfile, 'crawldog')

    grabber_was_collecting = False
    device = False

    while True:
        try:
            if not device or not is_device(device):
                device = get_device(configfile)

            myjd_packages = get_info(configfile, device)
            grabber_collecting = myjd_packages[2]

            if grabber_was_collecting or grabber_collecting:
                grabber_was_collecting = grabber_collecting
                time.sleep(5)
            else:
                packages_in_downloader_decrypted = myjd_packages[4][0]
                packages_in_linkgrabber_decrypted = myjd_packages[4][1]
                offline_packages = myjd_packages[4][2]
                encrypted_packages = myjd_packages[4][3]

                try:
                    watched_titles = db.retrieve_all_titles()
                except:
                    watched_titles = False

                notify_list = []

                if packages_in_downloader_decrypted or packages_in_linkgrabber_decrypted or offline_packages or encrypted_packages:

                    if watched_titles:
                        for title in watched_titles:
                            if packages_in_downloader_decrypted:
                                for package in packages_in_downloader_decrypted:
                                    if title[0] in package[
                                            'name'] or title[0].replace(
                                                ".", " ") in package['name']:
                                        check = hoster_check(
                                            configfile, device, [package],
                                            title[0], [0])
                                        device = check[0]
                                        if device:
                                            db.delete(title[0])

                            if packages_in_linkgrabber_decrypted:
                                for package in packages_in_linkgrabber_decrypted:
                                    if title[0] in package[
                                            'name'] or title[0].replace(
                                                ".", " ") in package['name']:
                                        check = hoster_check(
                                            configfile, device, [package],
                                            title[0], [0])
                                        device = check[0]
                                        episode = RssDb(
                                            dbfile,
                                            'episode_remover').retrieve(
                                                title[0])
                                        if episode:
                                            filenames = package['filenames']
                                            if len(filenames) > 1:
                                                fname_episodes = []
                                                for fname in filenames:
                                                    try:
                                                        if re.match(
                                                                r'.*S\d{1,3}E\d{1,3}.*',
                                                                fname,
                                                                flags=re.
                                                                IGNORECASE):
                                                            fname = re.findall(
                                                                r'S\d{1,3}E(\d{1,3})',
                                                                fname,
                                                                flags=re.
                                                                IGNORECASE
                                                            ).pop()
                                                        else:
                                                            fname = fname.replace(
                                                                "hddl8",
                                                                "").replace(
                                                                    "dd51", ""
                                                                ).replace(
                                                                    "264", ""
                                                                ).replace(
                                                                    "265", "")
                                                    except:
                                                        fname = fname.replace(
                                                            "hddl8",
                                                            "").replace(
                                                                "dd51",
                                                                "").replace(
                                                                    "264", ""
                                                                ).replace(
                                                                    "265", "")
                                                    fname_episode = "".join(
                                                        re.findall(
                                                            r'\d+',
                                                            fname.split(
                                                                ".part")[0]))
                                                    try:
                                                        fname_episodes.append(
                                                            str(
                                                                int(fname_episode
                                                                    )))
                                                    except:
                                                        pass
                                                replacer = longest_substr(
                                                    fname_episodes)

                                                new_fname_episodes = []
                                                for new_ep_fname in fname_episodes:
                                                    try:
                                                        new_fname_episodes.append(
                                                            str(
                                                                int(
                                                                    new_ep_fname
                                                                    .replace(
                                                                        replacer,
                                                                        ""))))
                                                    except:
                                                        pass
                                                replacer = longest_substr(
                                                    new_fname_episodes)

                                                newer_fname_episodes = []
                                                for new_ep_fname in new_fname_episodes:
                                                    try:
                                                        newer_fname_episodes.append(
                                                            str(
                                                                int(
                                                                    re.sub(
                                                                        replacer,
                                                                        "",
                                                                        new_ep_fname,
                                                                        1))))
                                                    except:
                                                        pass

                                                replacer = longest_substr(
                                                    newer_fname_episodes)

                                                even_newer_fname_episodes = []
                                                for newer_ep_fname in newer_fname_episodes:
                                                    try:
                                                        even_newer_fname_episodes.append(
                                                            str(
                                                                int(
                                                                    re.sub(
                                                                        replacer,
                                                                        "",
                                                                        newer_ep_fname,
                                                                        1))))
                                                    except:
                                                        pass

                                                if even_newer_fname_episodes:
                                                    fname_episodes = even_newer_fname_episodes
                                                elif newer_fname_episodes:
                                                    fname_episodes = newer_fname_episodes
                                                elif new_fname_episodes:
                                                    fname_episodes = new_fname_episodes

                                                delete_linkids = []
                                                pos = 0
                                                for delete_id in package[
                                                        'linkids']:
                                                    if str(episode) != str(
                                                            fname_episodes[pos]
                                                    ):
                                                        delete_linkids.append(
                                                            delete_id)
                                                    pos += 1
                                                if delete_linkids:
                                                    delete_uuids = [
                                                        package['uuid']
                                                    ]
                                                    RssDb(
                                                        dbfile,
                                                        'episode_remover'
                                                    ).delete(title[0])
                                                    device = remove_from_linkgrabber(
                                                        configfile, device,
                                                        delete_linkids,
                                                        delete_uuids)
                                        if autostart:
                                            device = move_to_downloads(
                                                configfile, device,
                                                package['linkids'],
                                                [package['uuid']])
                                        if device:
                                            db.delete(title[0])

                            if offline_packages:
                                for package in offline_packages:
                                    if title[0] in package[
                                            'name'] or title[0].replace(
                                                ".", " ") in package['name']:
                                        notify_list.append("[Offline] - " +
                                                           title[0])
                                        print((u"[Offline] - " + title[0]))
                                        db.delete(title[0])

                            if encrypted_packages:
                                for package in encrypted_packages:
                                    if title[0] in package[
                                            'name'] or title[0].replace(
                                                ".", " ") in package['name']:
                                        if title[1] == 'added':
                                            if retry_decrypt(
                                                    configfile, dbfile, device,
                                                    package['linkids'],
                                                [package['uuid']],
                                                    package['urls']):
                                                db.delete(title[0])
                                                db.store(title[0], 'retried')
                                        else:
                                            add_decrypt(
                                                package['name'],
                                                package['url'], "", dbfile)
                                            device = remove_from_linkgrabber(
                                                configfile, device,
                                                package['linkids'],
                                                [package['uuid']])
                                            notify_list.append(
                                                "[Click'n'Load notwendig] - " +
                                                title[0])
                                            print(
                                                u"[Click'n'Load notwendig] - "
                                                + title[0])
                                            db.delete(title[0])
                else:
                    if not grabber_collecting:
                        db.reset()

                if notify_list:
                    notify(notify_list, configfile)

                time.sleep(30)
        except Exception:
            traceback.print_exc()
            time.sleep(30)
示例#12
0
def download_sj(payload, configfile, dbfile):
    hostnames = RssConfig('Hostnames', configfile)
    sj = hostnames.get('sj')

    payload = decode_base64(payload).split("|")
    href = payload[0]
    title = payload[1]
    special = payload[2].strip().replace("None", "")

    series_url = 'https://' + sj + href
    series_info = get_url(series_url, configfile, dbfile)
    series_id = re.findall(r'data-mediaid="(.*?)"', series_info)[0]

    api_url = 'https://' + sj + '/api/media/' + series_id + '/releases'
    releases = get_url(api_url, configfile, dbfile)

    seasons = json.loads(releases)

    listen = ["SJ_Serien", "MB_Staffeln"]
    for liste in listen:
        cont = ListDb(dbfile, liste).retrieve()
        list_title = sanitize(title)
        if not cont:
            cont = ""
        if list_title not in cont:
            ListDb(dbfile, liste).store(list_title)

    config = RssConfig('SJ', configfile)
    english_ok = RssConfig('RSScrawler', configfile).get("english")
    quality = config.get('quality')
    ignore = config.get('rejectlist')

    result_seasons = {}
    result_episodes = {}

    for season in seasons:
        releases = seasons[season]
        for release in releases['items']:
            name = release['name'].encode('ascii',
                                          errors='ignore').decode('utf-8')
            hosters = release['hoster']
            try:
                valid = bool(release['resolution'] == quality)
            except:
                valid = re.match(re.compile(r'.*' + quality + r'.*'), name)
            if valid and special:
                valid = bool("." + special.lower() + "." in name.lower())
            if valid and not english_ok:
                valid = bool(".german." in name.lower())
            if valid:
                valid = False
                for hoster in hosters:
                    if hoster and check_hoster(
                            hoster,
                            configfile) or config.get("hoster_fallback"):
                        valid = True
            if valid:
                try:
                    ep = release['episode']
                    if ep:
                        existing = result_episodes.get(season)
                        if existing:
                            for e in existing:
                                if e == ep:
                                    if rate(name, ignore) > rate(
                                            existing[e], ignore):
                                        existing.update({ep: name})
                        else:
                            existing = {ep: name}
                        result_episodes.update({season: existing})
                        continue
                except:
                    pass

                existing = result_seasons.get(season)
                dont = False
                if existing:
                    if rate(name, ignore) < rate(existing, ignore):
                        dont = True
                if not dont:
                    result_seasons.update({season: name})

        try:
            if result_seasons[season] and result_episodes[season]:
                del result_episodes[season]
        except:
            pass

        success = False
        try:
            if result_seasons[season]:
                success = True
        except:
            try:
                if result_episodes[season]:
                    success = True
            except:
                pass

        if success:
            logger.debug(u"Websuche erfolgreich für " + title + " - " + season)
        else:
            for release in releases['items']:
                name = release['name'].encode('ascii',
                                              errors='ignore').decode('utf-8')
                hosters = release['hoster']
                valid = True
                if valid and special:
                    valid = bool("." + special.lower() + "." in name.lower())
                if valid and not english_ok:
                    valid = bool(".german." in name.lower())
                if valid:
                    valid = False
                    for hoster in hosters:
                        if hoster and check_hoster(
                                hoster,
                                configfile) or config.get("hoster_fallback"):
                            valid = True
                if valid:
                    try:
                        ep = release['episode']
                        if ep:
                            existing = result_episodes.get(season)
                            if existing:
                                for e in existing:
                                    if e == ep:
                                        if rate(name, ignore) > rate(
                                                existing[e], ignore):
                                            existing.update({ep: name})
                            else:
                                existing = {ep: name}
                            result_episodes.update({season: existing})
                            continue
                    except:
                        pass

                    existing = result_seasons.get(season)
                    dont = False
                    if existing:
                        if rate(name, ignore) < rate(existing, ignore):
                            dont = True
                    if not dont:
                        result_seasons.update({season: name})

            try:
                if result_seasons[season] and result_episodes[season]:
                    del result_episodes[season]
            except:
                pass
            logger.debug(u"Websuche erfolgreich für " + title + " - " + season)

    matches = []

    for season in result_seasons:
        matches.append(result_seasons[season])
    for season in result_episodes:
        for episode in result_episodes[season]:
            matches.append(result_episodes[season][episode])

    notify_array = []
    for title in matches:
        db = RssDb(dbfile, 'rsscrawler')
        if add_decrypt(title, series_url, sj, dbfile):
            db.store(title, 'added')
            log_entry = u'[Suche/Serie] - ' + title + ' - [SJ]'
            logger.info(log_entry)
            notify_array.append(log_entry)

    notify(notify_array, configfile)

    if not matches:
        return False
    return matches
示例#13
0
def download_bl(payload, device, configfile, dbfile):
    hostnames = RssConfig('Hostnames', configfile)
    mb = hostnames.get('mb')
    nk = hostnames.get('nk')
    fc = hostnames.get('fc').replace('www.', '').split('.')[0]

    payload = decode_base64(payload).split("|")
    link = payload[0]
    password = payload[1]
    url = get_url(link, configfile, dbfile)
    if not url or "NinjaFirewall 429" in url:
        return False

    config = RssConfig('MB', configfile)
    db = RssDb(dbfile, 'rsscrawler')
    soup = BeautifulSoup(url, 'lxml')

    site = check_is_site(link, configfile)
    if not site:
        return False
    else:
        if "MB" in site:
            if not fc:
                print(
                    u"FC Hostname nicht gesetzt. MB kann keine Links finden!")
                return False
            key = soup.find("span", {"class": "fn"}).text
            hosters = soup.find_all("a", href=re.compile(fc))
            url_hosters = []
            for hoster in hosters:
                dl = hoster["href"]
                hoster = hoster.text
                url_hosters.append([dl, hoster])
        elif "HW" in site:
            if not fc:
                print(
                    u"FC Hostname nicht gesetzt. MB kann keine Links finden!")
                return False
            key = re.findall(r'Permanent Link: (.*?)"', str(soup)).pop()
            hosters = soup.find_all("a", href=re.compile(fc))
            url_hosters = []
            for hoster in hosters:
                dl = hoster["href"]
                hoster = hoster.text
                url_hosters.append([dl, hoster])
        elif "HS" in site:
            download = soup.find("div", {"class": "entry-content"})
            key = soup.find("h2", {"class": "entry-title"}).text
            url_hosters = re.findall(r'href="([^"\'>]*)".+?(.+?)<',
                                     str(download))
        elif "NK" in site:
            key = soup.find("span", {"class": "subtitle"}).text
            url_hosters = []
            hosters = soup.find_all("a", href=re.compile("/go/"))
            for hoster in hosters:
                url_hosters.append(
                    ['https://' + nk + hoster["href"], hoster.text])
        elif "FX" in site:
            key = payload[1]
            password = payload[2]
        else:
            return False

        links = {}
        if "MB" in site or "HW" in site or "HS" in site or "NK" in site:
            for url_hoster in reversed(url_hosters):
                try:
                    if mb.split('.')[0] not in url_hoster[
                            0] and "https://goo.gl/" not in url_hoster[0]:
                        link_hoster = url_hoster[1].lower().replace(
                            'target="_blank">', '').replace(" ", "-")
                        if check_hoster(link_hoster, configfile):
                            links[link_hoster] = url_hoster[0]
                except:
                    pass
            if config.get("hoster_fallback") and not links:
                for url_hoster in reversed(url_hosters):
                    if mb.split('.')[0] not in url_hoster[
                            0] and "https://goo.gl/" not in url_hoster[0]:
                        link_hoster = url_hoster[1].lower().replace(
                            'target="_blank">', '').replace(" ", "-")
                        links[link_hoster] = url_hoster[0]
            download_links = list(links.values())
        elif "FX" in site:
            download_links = fx_download_links(url, key, configfile)

        englisch = False
        if "*englisch" in key.lower() or "*english" in key.lower():
            key = key.replace('*ENGLISCH',
                              '').replace("*Englisch", "").replace(
                                  "*ENGLISH", "").replace("*English",
                                                          "").replace("*", "")
            englisch = True

        staffel = re.search(r"s\d{1,2}(-s\d{1,2}|-\d{1,2}|\.)", key.lower())

        if config.get('enforcedl') and '.dl.' not in key.lower():
            fail = False
            get_imdb_url = url
            key_regex = r'<title>' + \
                        re.escape(
                            key) + r'.*?<\/title>\n.*?<link>(?:(?:.*?\n){1,25}).*?[mM][kK][vV].*?(?:|href=.?http(?:|s):\/\/(?:|www\.)imdb\.com\/title\/(tt[0-9]{7,9}).*?)[iI][mM][dD][bB].*?(?!\d(?:\.|\,)\d)(?:.|.*?)<\/a>'
            imdb_id = re.findall(key_regex, get_imdb_url)
            if len(imdb_id) > 0:
                if not imdb_id[0]:
                    fail = True
                else:
                    imdb_id = imdb_id[0]
            else:
                fail = True
            if fail:
                try:
                    search_title = re.findall(
                        r"(.*?)(?:\.(?:(?:19|20)\d{2})|\.German|\.\d{3,4}p|\.S(?:\d{1,3})\.)",
                        key)[0].replace(".", "+")
                    search_url = "http://www.imdb.com/find?q=" + search_title
                    search_page = get_url(search_url, configfile, dbfile)
                    search_results = re.findall(
                        r'<td class="result_text"> <a href="\/title\/(tt[0-9]{7,9})\/\?ref_=fn_al_tt_\d" >(.*?)<\/a>.*? \((\d{4})\)..(.{9})',
                        search_page)
                    total_results = len(search_results)
                except:
                    return False
                if staffel:
                    try:
                        imdb_id = search_results[0][0]
                    except:
                        imdb_id = False
                else:
                    no_series = False
                    while total_results > 0:
                        attempt = 0
                        for result in search_results:
                            if result[3] == "TV Series":
                                no_series = False
                                total_results -= 1
                                attempt += 1
                            else:
                                no_series = True
                                imdb_id = search_results[attempt][0]
                                total_results = 0
                                break
                    if no_series is False:
                        logger.debug(
                            "%s - Keine passende Film-IMDB-Seite gefunden" %
                            key)

            if staffel:
                filename = 'MB_Staffeln'
            else:
                filename = 'MB_Filme'

            scraper = cloudscraper.create_scraper()
            blog = BL(configfile,
                      dbfile,
                      device,
                      logging,
                      scraper,
                      filename=filename)

            if not imdb_id:
                if not blog.dual_download(key, password):
                    logger.debug("%s - Kein zweisprachiges Release gefunden." %
                                 key)
            else:
                if isinstance(imdb_id, list):
                    imdb_id = imdb_id.pop()
                imdb_url = "http://www.imdb.com/title/" + imdb_id
                details = get_url(imdb_url, configfile, dbfile)
                if not details:
                    logger.debug("%s - Originalsprache nicht ermittelbar" %
                                 key)
                original_language = re.findall(
                    r"Language:<\/h4>\n.*?\n.*?url'>(.*?)<\/a>", details)
                if original_language:
                    original_language = original_language[0]
                if original_language == "German":
                    logger.debug(
                        "%s - Originalsprache ist Deutsch. Breche Suche nach zweisprachigem Release ab!"
                        % key)
                else:
                    if not blog.dual_download(key, password) and not englisch:
                        logger.debug(
                            "%s - Kein zweisprachiges Release gefunden!" % key)

        if download_links:
            if staffel:
                if myjd_download(configfile, dbfile, device, key, "RSScrawler",
                                 download_links, password):
                    db.store(
                        key.replace(".COMPLETE", "").replace(".Complete", ""),
                        'notdl' if config.get('enforcedl')
                        and '.dl.' not in key.lower() else 'added')
                    log_entry = '[Suche/Staffel] - ' + key.replace(
                        ".COMPLETE", "").replace(".Complete",
                                                 "") + ' - [' + site + ']'
                    logger.info(log_entry)
                    notify([log_entry], configfile)
                    return True
            elif '.3d.' in key.lower():
                retail = False
                if config.get('cutoff') and '.COMPLETE.' not in key.lower():
                    if config.get('enforcedl'):
                        if is_retail(key, '2', dbfile):
                            retail = True
                if myjd_download(configfile, dbfile, device, key,
                                 "RSScrawler/3Dcrawler", download_links,
                                 password):
                    db.store(
                        key, 'notdl' if config.get('enforcedl')
                        and '.dl.' not in key.lower() else 'added')
                    log_entry = '[Suche/Film' + (
                        '/Retail' if retail else
                        "") + '/3D] - ' + key + ' - [' + site + ']'
                    logger.info(log_entry)
                    notify([log_entry], configfile)
                    return True
            else:
                retail = False
                if config.get('cutoff') and '.COMPLETE.' not in key.lower():
                    if config.get('enforcedl'):
                        if is_retail(key, '1', dbfile):
                            retail = True
                    else:
                        if is_retail(key, '0', dbfile):
                            retail = True
                if myjd_download(configfile, dbfile, device, key, "RSScrawler",
                                 download_links, password):
                    db.store(
                        key, 'notdl' if config.get('enforcedl')
                        and '.dl.' not in key.lower() else 'added')
                    log_entry = '[Suche/Film' + (
                        '/Englisch' if englisch and not retail else
                        '') + ('/Englisch/Retail' if englisch and retail else
                               '') + ('/Retail' if not englisch and retail else
                                      '') + '] - ' + key + ' - [' + site + ']'
                    logger.info(log_entry)
                    notify([log_entry], configfile)
                    return [key]
        else:
            return False