コード例 #1
0
ファイル: fansub.py プロジェクト: zetamini/YYeTsBot
 def search_result(self, resource_url) -> dict:
     # yyets offline
     # https://yyets.dmesg.app/resource.html?id=37089
     rid = resource_url.split("id=")[1]
     data: dict = self.collection.find_one({"data.info.id": int(rid)}, {'_id': False})
     name = data["data"]["info"]["cnname"]
     return {"all": json.dumps(data, ensure_ascii=False), "share": WORKERS.format(id=rid), "cnname": name}
コード例 #2
0
    def search_result(self, resource_url) -> dict:
        # yyets offline

        # resource: https://yyets.dmesg.app/resource.html?id=37089
        # comment: 'https://yyets.dmesg.app/resource.html?id=233#61893ae51e9152e43fa24124'
        if "#" in resource_url:
            cid = resource_url.split("#")[1]
            data: dict = self.db["comment"].find_one({"_id": ObjectId(cid)}, {
                '_id': False,
                "ip": False,
                "type": False,
                "children": False,
                "browser": False
            })
            share = resource_url
            name = f"{data['username']} 的分享"
            t = "comment"
        else:
            rid = resource_url.split("id=")[1]
            data: dict = self.collection.find_one({"data.info.id": int(rid)},
                                                  {'_id': False})
            name = data["data"]["info"]["cnname"]
            share = WORKERS.format(id=rid)
            t = "resource"

        return {
            "all": json.dumps(data, ensure_ascii=False, indent=4),
            "share": share,
            "cnname": name,
            "type": t
        }
コード例 #3
0
ファイル: fansub.py プロジェクト: Linux-Doc/YYeTsBot
 def search_result(self, resource_url) -> dict:
     self.url = resource_url
     query_url = WORKERS.format(id=self.id)
     api_res = requests.get(query_url).json()
     cnname = api_res["data"]["info"]["cnname"]
     # for universal purpose, we return the same structure.
     self.data = {"all": api_res, "share": query_url, "cnname": cnname}
     return self.data
コード例 #4
0
def create_workers(sess, Agent_class, envname, dataset, scorekeeper):
    '''
    Initialises instances of the Worker class for the desired number of parallel threads to run

    * arguments:

    Agent_class
        the class of the agent to be used as a worker on the different threads
    dataset
        where to store the work conducted by the workers
    '''
    global WORKERS
    for thread in range(NUM_THREADS):
        print("Creating thread {}...".format(thread), end='\r')
        WORKERS.append(
            Worker(sess, Agent_class, thread, dataset, scorekeeper, envname))
    print()
コード例 #5
0
ファイル: fansub.py プロジェクト: zephyr-y/YYeTsBot
    def search_result(self, resource_url) -> dict:
        # yyets offline
        self.url = resource_url

        data: dict = self.collection.find_one({"url": self.url})
        rid = data["id"]
        name = data["data"]["data"]["info"]["cnname"]
        data.pop("_id")
        self.data = {"all": data, "share": WORKERS.format(id=rid), "cnname": name}
        return self.data
コード例 #6
0
 def search_result(self, resource_url) -> dict:
     # yyets offline
     self.url = resource_url
     # http://www.rrys2020.com/resource/10017
     rid = self.url.split("/resource/")[1]
     data: dict = self.collection.find_one({"data.info.id": int(rid)},
                                           {'_id': False})
     name = data["data"]["info"]["cnname"]
     self.data = {
         "all": data,
         "share": WORKERS.format(id=rid),
         "cnname": name
     }
     return self.data
コード例 #7
0
def offline_search(search_content):
    # from cloudflare workers
    # no redis cache for now
    logging.info("Loading data from cfkv...")
    index = WORKERS.format(id="index")
    data: dict = requests.get(index).json()
    logging.info("Loading complete, searching now...")

    results = {}
    for name, rid in data.items():
        if search_content in name:
            fake_url = f"http://www.rrys2020.com/resource/{rid}"
            results[fake_url] = name.replace("\n", " ")
    logging.info("Search complete")
    return results
コード例 #8
0
ファイル: fansub.py プロジェクト: Linux-Doc/YYeTsBot
    def search_preview(self, search_text: str) -> dict:
        # from cloudflare workers
        # no redis cache for now - why? because we may update cloudflare
        logging.info("[%s] Loading offline data from cloudflare KV storage...", self.label)
        index = WORKERS.format(id="index")
        data: dict = requests.get(index).json()

        results = {}
        for name, rid in data.items():
            # make them both lower
            if search_text.lower() in name.lower():
                fake_url = f"http://www.rrys2020.com/resource/{rid}"
                results[fake_url] = name.replace("\n", " ")
        logging.info("[%s] Offline search complete", self.label)
        results["source"] = self.label
        return results
コード例 #9
0
def offline_link(resource_url) -> str:
    rid = resource_url.split("/")[-1]
    query_url = WORKERS.format(id=rid)
    # TODO: too lazy to optimize cloudflare worker page.
    return query_url