async def from_url(cls, url: str) -> Optional["ScanOne"]: rule = re.compile('https://wwv.scan-1.com/([\w-]+)') match = rule.match(url) if not match: return None instance = cls(name=match.groups()[0]) page = await instance.parse_url(url) chapters_lis = page.find('ul', class_='chapters').find_all('li') def unpack_li(index: int, li) -> ScanOneChapter: link = li.find('a') return ScanOneChapter(name=link.text.replace(instance.name, '').strip(), episode=index, key_name=link['href'].split('/')[-1]) instance.chapters = list([ unpack_li(index, li) for index, li in enumerate(chapters_lis[::-1], start=1) ]) mark_parents(instance) return instance
async def from_url(cls, url: str) -> Optional["WebToon"]: rule = re.compile( r'^https://www\.webtoons\.com/(\w+)/([\w-]+)/([\w-]+)/([\w-]+)/viewer\?title_no=(\d+)&episode_no=(\d+)' ) match = rule.match(url) if not match: return None lang, gender, name, chapter_name, titleno, episode_number = match.groups( ) try: return await cls.objects.get(name=name, lang=lang) except cls.DocumentNotFound: pass instance = cls(name=name, lang=lang, gender=gender, titleno=titleno, chapters=[ WebToonChapter(name=chapter_name, episode=episode_number) ]) mark_parents(instance) instance.chapters = await instance.chapters[0].others() return instance
async def from_scratch(cls, driver: Optional[Chrome] = None): instance = cls() instance._driver = driver instance.chapters = await instance.get_chapters() mark_parents(instance) return instance
async def from_name(cls, name: str): instance = cls(name=name) instance.chapters = await instance.discover_chapters() mark_parents(instance) return instance
async def nexts(self) -> List["LelScanChapter"]: instance = await self._parent.from_name(self._parent.name) chapters = list( filter(lambda chapter: chapter > self, instance.chapters)) mark_parents(instance, self) return chapters