Esempio n. 1
0
async def chapter(request):
    """
    返回小说章节目录页
    : content_url   这决定当前U页面url的生成方式
    : url           章节目录页源url
    : novels_name   小说名称
    :return: 小说章节内容页
    """
    url = request.args.get('url', None)
    novels_name = request.args.get('novels_name', None)
    netloc = get_netloc(url)
    if netloc not in RULES.keys():
        return redirect(url)
    if netloc in REPLACE_RULES.keys():
        url = url.replace(REPLACE_RULES[netloc]['old'],
                          REPLACE_RULES[netloc]['new'])
    content_url = RULES[netloc].content_url
    content = await cache_owllook_novels_chapter(url=url, netloc=netloc)
    if content:
        content = str(content).strip('[],, Jjs').replace(', ', '').replace(
            'onerror', '').replace('js', '').replace('加入书架', '')
        return template('chapter.html',
                        novels_name=novels_name,
                        url=url,
                        content_url=content_url,
                        soup=content)
    else:
        return text('解析失败,请将失败页面反馈给本站,请重新刷新一次,或者访问源网页:{url}'.format(url=url))
Esempio n. 2
0
async def chapter(request):
    """
    返回小说章节目录页
    : content_url   这决定当前U页面url的生成方式
    : url           章节目录页源url
    : novels_name   小说名称
    :return: 小说章节内容页
    """
    url = request.args.get('url', None)
    novels_name = request.args.get('novels_name', None)
    netloc = get_netloc(url)
    if netloc not in RULES.keys():
        return redirect(url)
    if netloc in REPLACE_RULES.keys():
        url = url.replace(REPLACE_RULES[netloc]['old'],
                          REPLACE_RULES[netloc]['new'])
    content_url = RULES[netloc].content_url
    content = await cache_owllook_novels_chapter(url=url, netloc=netloc)
    if content:
        content = str(content).strip('[],, Jjs').replace(', ', '').replace(
            'onerror', '').replace('js', '').replace('加入书架', '')
        if request.args.get('add_kindle', None):
            h = areader()
            h.feed(content)
            if (content_url == '1'):
                content_url = ''
            elif (content_url == '0'):
                content_url = url
            elif (content_url == '-1'):
                content_url = url
            (a, b) = h.chapters[0]
            link = "http://127.0.0.1:8001/owllook_content?url=" + content_url + "%s&name=%s&chapter_url=" + url + "&novels_name=%s"
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect(('127.0.0.1', 31419))
            f = open('/tmp/ow_links', 'w')
            print(len(h.chapters))
            jjj.dump([{
                'title':
                title,
                'url':
                link % (curl, urllib.parse.quote(title),
                        urllib.parse.quote(novels_name))
            } for (title, curl) in h.chapters], f)
            f.close()
            s.send(
                pickle.dumps(
                    (novels_name, len(h.chapters), "*****@*****.**")))
            return redirect("https://fss.cjwddtc.win")
        return template('chapter.html',
                        novels_name=novels_name,
                        url=url,
                        content_url=content_url,
                        soup=content)
    else:
        return text('解析失败,请将失败页面反馈给本站,请重新刷新一次,或者访问源网页:{url}'.format(url=url))