def series_data(request, series_slug): series = Series.objects.get(slug=series_slug) chapters = Chapter.objects.filter(series=series) chapters_dict = {} for chapter in chapters: chapter_media_path = os.path.join(settings.MEDIA_ROOT, "manga", series_slug, "chapters", chapter.folder) ch_clean = Chapter.clean_chapter_number(chapter) if ch_clean in chapters_dict: chapters_dict[ch_clean]["groups"][str(chapter.group.id)] = sorted( os.listdir(chapter_media_path)) else: chapters_dict[ch_clean] = { "volume": str(chapter.volume), "title": chapter.title, "folder": chapter.folder, "groups": { str(chapter.group.id): sorted(os.listdir(chapter_media_path)) } } data = { "slug": series_slug, "title": series.name, "chapters": chapters_dict } return HttpResponse(JsonResponse(data))
def series_data(series_slug): series = Series.objects.get(slug=series_slug) chapters = Chapter.objects.filter(series=series).select_related('group') chapters_dict = {} groups_dict = {} for chapter in chapters: chapter_media_path = os.path.join(settings.MEDIA_ROOT, "manga", series_slug, "chapters", chapter.folder) ch_clean = Chapter.clean_chapter_number(chapter) groups_dict[str(chapter.group.id)] = chapter.group.name query_string = "" if not chapter.version else f"?v{chapter.version}" if ch_clean in chapters_dict: chapters_dict[ch_clean]["groups"][str(chapter.group.id)] = sorted([u + query_string for u in os.listdir(os.path.join(chapter_media_path, str(chapter.group.id)))]) else: chapters_dict[ch_clean] = { "volume": str(chapter.volume), "title": chapter.title, "folder": chapter.folder, "groups": { str(chapter.group.id): sorted([u + query_string for u in os.listdir(os.path.join(chapter_media_path, str(chapter.group.id)))]) } } if chapter.preferred_sort: try: chapters_dict[ch_clean]["preferred_sort"] = json.loads(chapter.preferred_sort) except: pass vols = Volume.objects.filter(series=series).order_by('-volume_number') cover_vol_url = "" for vol in vols: if vol.volume_cover: cover_vol_url = f"/media/{vol.volume_cover}" break return {"slug": series_slug, "title": series.name, "description": series.synopsis, "author": series.author.name, "artist": series.artist.name, "groups": groups_dict, "cover": cover_vol_url, "preferred_sort": settings.PREFERRED_SORT, "chapters": chapters_dict}
def filter_queryset(self, request: Request, queryset: QuerySet, view: ViewSet) -> QuerySet: if view.action != 'list': return queryset params = {'series', 'volume', 'number'} if not params.issubset(request.query_params.keys()): raise ValidationError( detail={'error': f'{params} are required parameters.'}) series = request.query_params['series'] volume = request.query_params['volume'] number = request.query_params['number'] if request.query_params.get('track') == 'true': Chapter.track_view(series__slug=series, volume=volume, number=number) return queryset.filter(chapter__series__slug=series, chapter__volume=volume, chapter__number=number).order_by('number')
def home(request): """ Homepage """ chapters = Chapter.only_published().prefetch_related('team', 'comic') posts = Post.objects.filter(published=True).prefetch_related('author')[:5] return cacheatron( request, render(request, 'blog/home.html', {'chapters': chapters, 'posts': posts}), (zxpost, zxchapter) )
def _chapter_response(request: 'HttpRequest', _chapter: Chapter) -> Dict: url = request.build_absolute_uri(_chapter.get_absolute_url()) return { 'url': url, 'title': _chapter.title, 'full_title': str(_chapter), 'pages_root': url.replace('/reader/', f'{settings.MEDIA_URL}series/'), 'pages_list': [p._file_name for p in _chapter.pages.iterator()], 'date': http_date(_chapter.published.timestamp()), 'final': _chapter.final, 'groups': list(_chapter.groups.values('id', 'name')) }
def item_description(self, item: Chapter) -> str: """ Get the description of the item. :param item: A ``Chapter`` object. :return: The ``Chapter`` object as a string. """ desc = str(item) if settings.CONFIG['ALLOW_DLS']: domain = settings.CONFIG["DOMAIN"] url = item.get_absolute_url()[:-1] + '.cbz' desc = f'<a href="http://{domain}{url}">{desc}</a>' return desc
def item_description(self, item: Chapter) -> str: """ Get the description of the item. :param item: A ``Chapter`` object. :return: The ``Chapter`` object as a string. """ desc = str(item) if settings.CONFIG['ALLOW_DLS']: domain = settings.CONFIG['DOMAIN'] scheme = settings.ACCOUNT_DEFAULT_HTTP_PROTOCOL url = item.get_absolute_url()[:-1] + '.cbz' desc = f'<a href="{scheme}://{domain}{url}">{desc}</a>' return desc
def series_data(series_slug): series = Series.objects.get(slug=series_slug) chapters = Chapter.objects.filter(series=series).select_related('group') chapters_dict = {} groups_dict = {} for chapter in chapters: chapter_media_path = os.path.join(settings.MEDIA_ROOT, "manga", series_slug, "chapters", chapter.folder) ch_clean = Chapter.clean_chapter_number(chapter) groups_dict[str(chapter.group.id)] = chapter.group.name if ch_clean in chapters_dict: chapters_dict[ch_clean]["groups"][str(chapter.group.id)] = sorted( os.listdir( os.path.join(chapter_media_path, str(chapter.group.id)))) else: chapters_dict[ch_clean] = { "volume": str(chapter.volume), "title": chapter.title, "folder": chapter.folder, "groups": { str(chapter.group.id): sorted( os.listdir( os.path.join(chapter_media_path, str(chapter.group.id)))) } } vols = Volume.objects.filter(series=series).order_by('-volume_number') cover_vol_url = "" for vol in vols: if vol.volume_cover: cover_vol_url = f"/media/{vol.volume_cover}" break return { "slug": series_slug, "title": series.name, "description": series.synopsis, "author": series.author.name, "artist": series.artist.name, "groups": groups_dict, "cover": cover_vol_url, "chapters": chapters_dict }
def get_queryset(self): queryset = Chapter.only_published() # Set up eager loading to avoid N+1 selects queryset = self.get_serializer_class().setup_eager_loading(queryset) return queryset
def series_data(series_slug): series = Series.objects.filter(slug=series_slug).first() if not series: raise Http404("Page not found.") chapters = Chapter.objects.filter(series=series).select_related("group") chapters_dict = {} groups_dict = {} for chapter in chapters: chapter_media_path = os.path.join(settings.MEDIA_ROOT, "manga", series_slug, "chapters", chapter.folder) ch_clean = Chapter.clean_chapter_number(chapter) groups_dict[str(chapter.group.id)] = chapter.group.name query_string = "" if not chapter.version else f"?v{chapter.version}" if ch_clean in chapters_dict: chapters_dict[ch_clean]["groups"][str(chapter.group.id)] = sorted([ u + query_string for u in os.listdir( os.path.join(chapter_media_path, str(chapter.group.id))) ]) chapters_dict[ch_clean]["release_date"][str( chapter.group.id)] = int(chapter.uploaded_on.timestamp()) else: chapters_dict[ch_clean] = { "volume": str(chapter.volume), "title": chapter.title, "folder": chapter.folder, "groups": { str(chapter.group.id): sorted([ u + query_string for u in os.listdir( os.path.join(chapter_media_path, str(chapter.group.id))) ]) }, "release_date": { str(chapter.group.id): int(chapter.uploaded_on.timestamp()) }, } if chapter.wo and chapter.wo != 0: chapters_dict[ch_clean]["wo"] = chapter.wo if chapter.preferred_sort: try: chapters_dict[ch_clean]["preferred_sort"] = literal_eval( chapter.preferred_sort) except: pass vols = Volume.objects.filter(series=series).order_by("-volume_number") cover_vol_url = "" for vol in vols: if vol.volume_cover: cover_vol_url = f"/media/{vol.volume_cover}" break return { "slug": series_slug, "title": series.name, "description": series.synopsis, "author": series.author.name, "artist": series.artist.name, "groups": groups_dict, "cover": cover_vol_url, "preferred_sort": settings.PREFERRED_SORT, "chapters": chapters_dict, "next_release_page": series.next_release_page, "next_release_time": series.next_release_time.timestamp() if series.next_release_time else None, "next_release_html": series.next_release_html, }
def handle(self, *args: str, **options: str): """ Execute the command. :param args: The arguments of the command. :param options: The options of the command. """ call_command('migrate', stdout=StringIO()) # Set up database root = abspath(options['root']) data = abspath(options['data']) tables = ET.parse(data).findall('database/table') content = join(root, 'content', 'comics') directories = {'series': [], 'chapters': []} elements = { 'series': self._get_element(tables, 'comics'), 'chapters': self._get_element(tables, 'chapters'), 'pages': self._get_element(tables, 'pages'), 'groups': self._get_element(tables, 'teams') } if not options['noinput']: # pragma: no cover self._print_warning( 'Importing FoolSlide2 data requires an empty database.\n' 'This command will wipe any existing data in the database.\n' 'Are you sure you want to proceed?\n') answer = input(" Type 'yes' to continue, or 'no' to cancel: ") if answer != 'yes': self._print('Import cancelled.') return call_command('flush', '--no-input') self._print(f'Importing {self._sql_name("Groups")}...') all_groups = [] for g in elements['groups']: group = Group(id=self._get_column(g, 'id'), name=self._get_column(g, 'name'), website=self._get_column(g, 'url'), twitter=self._get_column(g, 'twitter'), irc=self._get_column(g, 'irc')) self._print(f'- Found {self._sql_name("Group")}: {group}') all_groups.append(group) try: Group.objects.bulk_create(all_groups) self._print_success('Groups successfully imported.') except IntegrityError as e: # pragma: no cover raise CommandError('Failed to insert groups') from e self._print(f'Importing {self._sql_name("Series")}...') all_series = [] for s in elements['series']: slug = self._get_column(s, 'stub') series = Series( id=self._get_column(s, 'id'), slug=slug, title=self._get_column(s, 'name'), description=self._get_column(s, 'description'), ) self._print(f'- Found {self._sql_name("Series")}: {series}') thumb = self._get_column(s, 'thumbnail') series_dir = join(content, f'{slug}_{self._get_column(s, "uniqid")}') cover = join(series_dir, thumb) with open(cover, 'rb') as f: series.cover.save(thumb, File(f), save=False) all_series.append(series) directories['series'].append((self._get_column(s, 'id'), series_dir)) try: Series.objects.bulk_create(all_series) self._print_success('Series successfully imported.') except IntegrityError as e: # pragma: no cover raise CommandError('Failed to insert series') from e self._print(f'Importing {self._sql_name("Chapters")}...') all_chapters = [] chapter_groups = [] groups_through = Chapter.groups.through for c in elements['chapters']: cid = self._get_column(c, 'id') sid = self._get_column(c, 'comic_id') number = float('{chapter}.{subchapter}'.format( chapter=self._get_column(c, 'chapter') or '0', subchapter=self._get_column(c, 'subchapter') or '0')) volume = int(self._get_column(c, 'volume') or '0') chapter = Chapter(id=cid, series_id=sid, title=self._get_column(c, 'name'), volume=volume, number=number) self._print( f'- Found {self._sql_name("Chapter")}: {chapter.series} ' f'- {chapter.volume}/{chapter.number:g} - {chapter.title}') gid = self._get_column(c, 'team_id') if gid: chapter_groups.append( groups_through(chapter_id=cid, group_id=gid)) _dir = next(d[1] for d in directories['series'] if d[0] == sid) directories['chapters'].append( (cid, join( _dir, '{stub}_{uniqid}'.format(stub=self._get_column(c, 'stub'), uniqid=self._get_column( c, 'uniqid'))))) all_chapters.append(chapter) try: Chapter.objects.bulk_create(all_chapters) groups_through.objects.bulk_create(chapter_groups) self._print_success('Chapters successfully imported.') except IntegrityError as e: # pragma: no cover raise CommandError('Failed to insert chapters') from e self._print(f'Importing {self._sql_name("Pages")}...') all_pages = [] page_numbers = {} for p in self._sort_children(elements['pages'], 'filename'): pid = self._get_column(p, 'id') cid = self._get_column(p, 'chapter_id') page_numbers[cid] = page_numbers.get(cid, 0) + 1 page = Page(id=pid, chapter_id=cid, number=page_numbers[cid]) self._print(f'- Found {self._sql_name("Page")}: {page}') _dir = next(d[1] for d in directories['chapters'] if d[0] == cid) fname = self._get_column(p, 'filename') with open(join(_dir, fname), 'rb') as f: page.image.save(fname, File(f), save=False) all_pages.append(page) try: Page.objects.bulk_create(all_pages) self._print_success('Chapter pages successfully imported.') except IntegrityError as e: # pragma: no cover raise CommandError('Failed to insert pages') from e self._print_success('Successfully imported FoolSlide2 data.')
from django.views.i18n import JavaScriptCatalog from . import settings from reader.models import Chapter, Comic, Team, Person from blog.models import Post # Admin site settings admin.site.site_title = settings.APP_NAME admin.site.site_header = settings.SITE_TITLE admin.site.index_title = settings.APP_NAME + ' Management System' # https://docs.djangoproject.com/en/2.0/ref/contrib/sitemaps/ sitemaps = { 'chapters': GenericSitemap( { 'queryset': Chapter.only_published(), 'date_field: ': 'modified_at' }, priority=0.6, changefreq='daily'), 'comics': GenericSitemap( { 'queryset': Comic.objects.filter(published=True), 'date_field: ': 'modified_at' }, priority=0.4, changefreq='weekly'), 'teams': GenericSitemap({'queryset': Team.objects.all()}, priority=0.4,
def handle(self, *args, **options): root = abspath(options['root']) data = abspath(options['data']) tables = et.parse(data).findall('database/table') content = join(root, 'content', 'comics') directories = {'series': [], 'chapters': []} elements = { 'series': _get_element(tables, 'comics'), 'chapters': _get_element(tables, 'chapters'), 'pages': _get_element(tables, 'pages'), 'groups': _get_element(tables, 'teams') } all_groups = [] for g in elements['groups']: group = Group(id=_get_column(g, 'id'), name=_get_column(g, 'name'), website=_get_column(g, 'url'), twitter=_get_column(g, 'twitter'), irc=_get_column(g, 'irc')) all_groups.append(group) Group.objects.bulk_create(all_groups) all_series = [] for s in elements['series']: slug = _get_column(s, 'stub') series = Series( id=_get_column(s, 'id'), slug=slug, title=_get_column(s, 'name'), description=_get_column(s, 'description'), ) thumb = _get_column(s, 'thumbnail') series_dir = join(content, '%s_%s' % (slug, _get_column(s, 'uniqid'))) cover = join(series_dir, 'thumb_%s' % thumb) with open(cover, 'rb') as f: series.cover.save(thumb, File(f), save=False) all_series.append(series) directories['series'].append((_get_column(s, 'id'), series_dir)) Series.objects.bulk_create(all_series) all_chapters = [] chapter_groups = [] groups_through = Chapter.groups.through for c in elements['chapters']: cid = _get_column(c, 'id') sid = _get_column(c, 'comic_id') number = float('%s.%s' % (_get_column(c, 'chapter') or '0', _get_column(c, 'subchapter') or '0')) volume = int(_get_column(c, 'volume') or '0') chapter = Chapter(id=cid, series_id=sid, title=_get_column(c, 'name'), volume=volume, number=number) gid = _get_column(c, 'team_id') if gid: chapter_groups.append( groups_through(chapter_id=cid, group_id=gid)) _dir = next(d[1] for d in directories['series'] if d[0] == sid) directories['chapters'].append( (cid, join( _dir, '%s_%s' % (_get_column(c, 'stub'), _get_column(c, 'uniqid'))))) all_chapters.append(chapter) Chapter.objects.bulk_create(all_chapters) groups_through.objects.bulk_create(chapter_groups) all_pages = [] page_numbers = {} for p in _sort_children(elements['pages'], 'filename'): pid = _get_column(p, 'id') cid = _get_column(p, 'chapter_id') page_numbers[cid] = page_numbers.get(cid, 0) + 1 page = Page(id=pid, chapter_id=cid, number=page_numbers[cid]) _dir = next(d[1] for d in directories['chapters'] if d[0] == cid) fname = _get_column(p, 'filename') with open(join(_dir, fname), 'rb') as f: page.image.save(fname, File(f), save=False) all_pages.append(page) Page.objects.bulk_create(all_pages)