コード例 #1
0
ファイル: common.py プロジェクト: sweatcher/RSScrawler
def entfernen(retailtitel, identifier):
    # Funktion um Listen einheitlich groß zu schreiben (vorraussetzung für einheitliches CutOff)
    def capitalize(line):
        return ' '.join(s[0].upper() + s[1:] for s in line.split(' '))
    log_info = logging.info
    log_debug = logging.debug
    # Retail Titel auf Listenformat kürzen und Zusatztags entfernen
    simplified = retailtitel.replace(".", " ")
    # Abgefahrener RegEx-String, der Retail-Releases identifiziert
    retail = re.sub(r'(|.UNRATED|Uncut|UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).\d{4}(|.UNRATED|Uncut|UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).(German|GERMAN)(|.AC3|.DTS)(|.DL)(|.AC3|.DTS).(1080|720)p.(HDDVD|BluRay)(|.AVC|.AVC.REMUX|.x264)(|.REPACK|.RERiP)-.*', "", simplified)
    # Obiger RegEx-String, der nicht das Jahr entfernt, falls in der Liste eine Jahreszahl angegeben wurde
    retailyear = re.sub(r'(|.UNRATED|Uncut|UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).(German|GERMAN)(|.AC3|.DTS)(|.DL)(|.AC3|.DTS).(1080|720)p.(HDDVD|BluRay)(|.AVC|.AVC.REMUX|.x264)(|.REPACK|.RERiP)-.*', "", simplified)
    if identifier == '2':
        liste = "MB_3D"
    else:
        liste = "MB_Filme"
    with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt'), 'r') as l:
        content = l.read()
        l.close()
    # Inhalt der liste Schreiben, wobei der Retail-Titel entfernt wird    
    with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt'), 'w') as w:
        w.write(content.replace(retailyear, "").replace(retail, "").replace(retailyear.lower(), "").replace(retail.lower(), "").replace(retailyear.upper(), "").replace(retail.upper(), "").replace(capitalize(retailyear), "").replace(capitalize(retail), ""))
    # Leerzeilen und Eingabefehler entfernen
    files.check()
    log_debug(retail + " durch Cutoff aus " + liste + " entfernt.")
    log_info(retail + (" [3D]" if identifier == "2" else "") + " [Retail]")
コード例 #2
0
def entfernen(retailtitel, identifier):
    def capitalize(line):
        line = line.rstrip()
        return ' '.join(s[0].upper() + s[1:] for s in line.split(' '))
    simplified = retailtitel.replace(".", " ")
    retail = re.sub(r'(|.UNRATED|.Unrated|.Uncut|.UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.Extended|.Theatrical|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU)(|.)\d{4}(|.)(|.UNRATED|.Unrated|.Uncut|.UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.Extended|.Theatrical|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).(German|GERMAN)(|.AC3|.DTS|.DTS-HD)(|.DL)(|.AC3|.DTS).(2160|1080|720)p.(UHD.|Ultra.HD.|)(HDDVD|BluRay)(|.HDR)(|.AVC|.AVC.REMUX|.x264|.x265)(|.REPACK|.RERiP)-.*', "", simplified)
    retailyear = re.sub(r'(|.UNRATED|.Unrated|.Uncut|.UNCUT)(|.Directors.Cut|.DC|.EXTENDED|.Extended|.Theatrical|.THEATRICAL)(|.3D|.3D.HSBS|.3D.HOU|.HSBS|.HOU).(German|GERMAN)(|.AC3|.DTS|.DTS-HD)(|.DL)(|.AC3|.DTS|.DTS-HD).(2160|1080|720)p.(UHD.|Ultra.HD.|)(HDDVD|BluRay)(|.HDR)(|.AVC|.AVC.REMUX|.x264|.x265)(|.REPACK|.RERiP)-.*', "", simplified)
    if identifier == '2':
        liste = "MB_3D"
    else:
        liste = "MB_Filme"
    with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt'), 'r') as l:
        content = []
        for line in l:
            content.append(re.sub(r'^(' + re.escape(retailyear) + '|' + re.escape(retail)+ '|' + re.escape(retailyear.lower()) + '|' + re.escape(retail.lower()) + '|' + re.escape(retailyear.upper()) + '|' + re.escape(retail.upper()) + '|' + re.escape(capitalize(retailyear)) + '|' + re.escape(capitalize(retail)) + ')', '', line))
        l.close()
    with open(os.path.join(os.path.dirname(sys.argv[0]), 'Einstellungen/Listen/' + liste + '.txt'), 'w') as w:
        w.write(''.join(content))
    files.check()
    log_debug(retail + " durch Cutoff aus " + liste + " entfernt.")
コード例 #3
0
def get_post_lists():
    if request.method == 'GET':
        return jsonify({
            "lists": {
                "mb": {
                    "filme": getListe('MB_Filme'),
                    "filme3d": getListe('MB_3D'),
                    "regex": getListe('MB_Regex'),
                },
                "sj": {
                    "serien": getListe('SJ_Serien'),
                    "regex": getListe('SJ_Serien_Regex'),
                    "staffeln_regex": getListe('SJ_Staffeln_Regex'),
                },
                "mbsj": {
                    "staffeln": getListe('MB_Staffeln'),
                },
                "yt": {
                    "kanaele_playlisten": getListe('YT_Channels'),
                },
            },
        })
    if request.method == 'POST':
        data = request.json
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/MB_Filme.txt'), 'wb') as f:
            f.write(data['mb']['filme'].encode('utf-8'))
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/MB_3D.txt'), 'wb') as f:
            f.write(data['mb']['filme3d'].encode('utf-8'))
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/MB_Staffeln.txt'),
                'wb') as f:
            f.write(data['mbsj']['staffeln'].encode('utf-8'))
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/MB_Regex.txt'), 'wb') as f:
            f.write(data['mb']['regex'].encode('utf-8'))
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/SJ_Serien.txt'), 'wb') as f:
            f.write(data['sj']['serien'].encode('utf-8'))
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/SJ_Serien_Regex.txt'),
                'wb') as f:
            f.write(data['sj']['regex'].encode('utf-8'))
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/SJ_Staffeln_Regex.txt'),
                'wb') as f:
            f.write(data['sj']['staffeln_regex'].encode('utf-8'))
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/Listen/YT_Channels.txt'),
                'wb') as f:
            f.write(data['yt']['kanaele_playlisten'].encode('utf-8'))
        files.check()
        return "Success", 201
    else:
        return "Failed", 405
コード例 #4
0
def get_post_settings():
    if request.method == 'GET':
        general = RssConfig('RSScrawler')
        alerts = RssConfig('Notifications')
        crawljobs = RssConfig('Crawljobs')
        mb = RssConfig('MB')
        sj = RssConfig('SJ')
        yt = RssConfig('YT')
        return jsonify({
            "settings": {
                "general": {
                    "pfad": general.get("jdownloader"),
                    "port": to_int(general.get("port")),
                    "prefix": general.get("prefix"),
                    "interval": to_int(general.get("interval")),
                    "english": bool(general.get("english")),
                    "hoster": general.get("hoster"),
                },
                "alerts": {
                    "homeassistant": alerts.get("homeassistant"),
                    "pushbullet": alerts.get("pushbullet"),
                    "pushover": alerts.get("pushover"),
                },
                "crawljobs": {
                    "autostart": bool(crawljobs.get("autostart")),
                    "subdir": bool(crawljobs.get("subdir")),
                },
                "mb": {
                    "quality": mb.get("quality"),
                    "ignore": mb.get("ignore"),
                    "regex": bool(mb.get("regex")),
                    "imdb_score": to_float(mb.get("imdb")),
                    "imdb_year": to_int(mb.get("imdbyear")),
                    "historical": bool(mb.get("historical")),
                    "force_dl": bool(mb.get("enforcedl")),
                    "cutoff": bool(mb.get("cutoff")),
                    "crawl_3d": bool(mb.get("crawl3d")),
                },
                "sj": {
                    "quality": sj.get("quality"),
                    "ignore": sj.get("rejectlist"),
                    "regex": bool(sj.get("regex")),
                },
                "mbsj": {
                    "enabled": bool(mb.get("crawlseasons")),
                    "quality": mb.get("seasonsquality"),
                    "packs": bool(mb.get("seasonpacks")),
                    "source": mb.get("seasonssource"),
                },
                "yt": {
                    "enabled": bool(yt.get("youtube")),
                    "max": to_int(yt.get("maxvideos")),
                    "ignore": yt.get("ignore"),
                }
            }
        })
    if request.method == 'POST':
        data = request.json
        with open(
                os.path.join(os.path.dirname(sys.argv[0]),
                             'Einstellungen/RSScrawler.ini'), 'wb') as f:
            f.write('# RSScrawler.ini (Stand: RSScrawler ' +
                    version.getVersion() + ')\n')
            f.write("\n[RSScrawler]\n")
            f.write("jdownloader = " +
                    to_str(data['general']['pfad']).encode('utf-8') + "\n")
            f.write("port = " +
                    to_str(data['general']['port']).encode('utf-8') + "\n")
            f.write("prefix = " +
                    to_str(data['general']['prefix']).encode('utf-8').lower() +
                    "\n")
            interval = to_str(data['general']['interval']).encode('utf-8')
            if to_int(interval) < 3:
                interval = '3'
            f.write("interval = " + interval + "\n")
            f.write("english = " +
                    to_str(data['general']['english']).encode('utf-8') + "\n")
            f.write("hoster = " +
                    to_str(data['general']['hoster']).encode('utf-8') + "\n")
            f.write("\n[MB]\n")
            f.write("quality = " +
                    to_str(data['mb']['quality']).encode('utf-8') + "\n")
            f.write("ignore = " +
                    to_str(data['mb']['ignore']).encode('utf-8').lower() +
                    "\n")
            f.write("historical = " +
                    to_str(data['mb']['historical']).encode('utf-8') + "\n")
            f.write("regex = " + to_str(data['mb']['regex']).encode('utf-8') +
                    "\n")
            f.write("cutoff = " +
                    to_str(data['mb']['cutoff']).encode('utf-8') + "\n")
            f.write("crawl3d = " +
                    to_str(data['mb']['crawl_3d']).encode('utf-8') + "\n")
            f.write("enforcedl = " +
                    to_str(data['mb']['force_dl']).encode('utf-8') + "\n")
            f.write("crawlseasons = " +
                    to_str(data['mbsj']['enabled']).encode('utf-8') + "\n")
            f.write("seasonsquality = " +
                    to_str(data['mbsj']['quality']).encode('utf-8') + "\n")
            f.write("seasonpacks = " +
                    to_str(data['mbsj']['packs']).encode('utf-8') + "\n")
            f.write("seasonssource = " +
                    to_str(data['mbsj']['source']).encode('utf-8').lower() +
                    "\n")
            f.write("imdbyear = " +
                    to_str(data['mb']['imdb_year']).encode('utf-8') + "\n")
            imdb = to_str(data['mb']['imdb_score']).encode('utf-8')
            if re.match('[^0-9]', imdb):
                imdb = 0.0
            elif imdb == '':
                imdb = 0.0
            else:
                imdb = round(
                    float(
                        to_str(
                            data['mb']['imdb_score']).encode('utf-8').replace(
                                ",", ".")), 1)
            if imdb > 10:
                imdb = 10.0
            f.write("imdb = " + to_str(imdb) + "\n")
            f.write("\n[SJ]\n")
            f.write("quality = " +
                    to_str(data['sj']['quality']).encode('utf-8') + "\n")
            f.write("rejectlist = " +
                    to_str(data['sj']['ignore']).encode('utf-8').lower() +
                    "\n")
            f.write("regex = " + to_str(data['sj']['regex']).encode('utf-8') +
                    "\n")
            f.write("\n[YT]\n")
            f.write("youtube = " +
                    to_str(data['yt']['enabled']).encode('utf-8') + "\n")
            maxvideos = to_str(data['yt']['max']).encode('utf-8')
            if maxvideos == "":
                maxvideos = "10"
            if to_int(maxvideos) < 1:
                f.write("maxvideos = 1\n")
            elif to_int(maxvideos) > 50:
                f.write("maxvideos = 50\n")
            else:
                f.write("maxvideos = " + to_str(maxvideos) + "\n")
            f.write("ignore = " +
                    to_str(data['yt']['ignore']).encode('utf-8') + "\n")
            f.write("\n[Notifications]\n")
            f.write("homeassistant = " +
                    to_str(data['alerts']['homeassistant']).encode('utf-8') +
                    "\n")
            f.write("pushbullet = " +
                    to_str(data['alerts']['pushbullet']).encode('utf-8') +
                    "\n")
            f.write("pushover = " +
                    to_str(data['alerts']['pushover']).encode('utf-8') + "\n")
            f.write("\n[Crawljobs]\n")
            f.write("autostart = " +
                    to_str(data['crawljobs']['autostart']).encode('utf-8') +
                    "\n")
            f.write("subdir = " +
                    to_str(data['crawljobs']['subdir']).encode('utf-8') + "\n")
        files.check()
        return "Success", 201
    else:
        return "Failed", 405