Exemplo n.º 1
0
    def get_object(self, request, searchterms, searchtype, page=1):
        self.author_id = None
        if not isinstance(page, int):
            page = int(page)
        page_num = page if page>0 else 1  

        if searchtype == 'm':
            series = Series.objects.filter(search_ser__contains=searchterms.upper())
        elif searchtype == 'b':
            series = Series.objects.filter(search_ser__startswith=searchterms.upper())
        elif searchtype == 'e':     
            series = Series.objects.filter(search_ser=searchterms.upper())       
        elif searchtype == 'a':
            try:
                self.author_id = int(searchterms)
            except:
                self.author_id = None
            series = Series.objects.filter(book__authors=self.author_id)
            
        series = series.annotate(count_book=Count('book')).distinct().order_by('search_ser')  
        
        # Создаем результирующее множество
        series_count = series.count()
        op = OPDS_Paginator(series_count, 0, page_num, config.SOPDS_MAXITEMS)        
        items = []
        
        for row in series[op.d1_first_pos:op.d1_last_pos+1]:
            p = {'id':row.id, 'ser':row.ser, 'lang_code': row.lang_code, 'book_count': row.count_book}
            items.append(p)   
        
        return {"series":items, "searchterms":searchterms, "searchtype":searchtype, "paginator":op.get_data_dict()}
Exemplo n.º 2
0
def CatalogsView(request):   
    args = {}

    if request.GET:
        cat_id = request.GET.get('cat', None)
        page_num = int(request.GET.get('page', '1'))   
    else:
        cat_id = None
        page_num = 1

    try:
        if cat_id is not None:
            cat = Catalog.objects.get(id=cat_id)
        else:
            cat = Catalog.objects.get(parent__id=cat_id)
    except Catalog.DoesNotExist:
        cat = None
    
    catalogs_list = Catalog.objects.filter(parent=cat).order_by("cat_name")
    catalogs_count = catalogs_list.count()
    # prefetch_related on sqlite on items >999 therow error "too many SQL variables"
    #books_list = Book.objects.filter(catalog=cat).prefetch_related('authors','genres','series').order_by("title")
    books_list = Book.objects.filter(catalog=cat).order_by("search_title")
    books_count = books_list.count()
    
    # Получаем результирующий список
    op = OPDS_Paginator(catalogs_count, books_count, page_num, config.SOPDS_MAXITEMS, HALF_PAGES_LINKS)
    items = []
    
    for row in catalogs_list[op.d1_first_pos:op.d1_last_pos+1]:
        p = {'is_catalog':1, 'title': row.cat_name,'id': row.id, 'cat_type':row.cat_type, 'parent_id':row.parent_id}       
        items.append(p)
          
    for row in books_list[op.d2_first_pos:op.d2_last_pos+1]:
        p = {'is_catalog':0, 'lang_code': row.lang_code, 'filename': row.filename, 'path': row.path, \
              'registerdate': row.registerdate, 'id': row.id, 'annotation': strip_tags(row.annotation), \
              'docdate': row.docdate, 'format': row.format, 'title': row.title, 'filesize': row.filesize//1000,
              'authors':row.authors.values(), 'genres':row.genres.values(), 'series':row.series.values(), 'ser_no':row.bseries_set.values('ser_no')}
        items.append(p)
                    
    args['paginator'] = op.get_data_dict()
    args['items']=items
    args['cat_id'] = cat_id
    args['current'] = 'catalog'     
    
    breadcrumbs_list = []
    if cat:
        while (cat.parent):
            breadcrumbs_list.insert(0, (cat.cat_name, cat.id))
            cat = cat.parent
        breadcrumbs_list.insert(0, (_('ROOT'), 0))  
    #breadcrumbs_list.insert(0, (_('Catalogs'),-1))    
    args['breadcrumbs_cat'] =  breadcrumbs_list  
    args['breadcrumbs'] =  [_('Catalogs')]
      
    return render(request,'sopds_catalogs.html', args)  
Exemplo n.º 3
0
def SearchSeriesView(request):
    #Read searchtype, searchterms, searchterms0, page from form
    args = {}
    args.update(csrf(request))

    if request.GET:
        searchtype = request.GET.get('searchtype', 'm')
        searchterms = request.GET.get('searchterms', '')
        #searchterms0 = int(request.POST.get('searchterms0', ''))
        page_num = int(request.GET.get('page', '1'))
        page_num = page_num if page_num > 0 else 1

        if searchtype == 'm':
            series = Series.objects.filter(
                search_ser__contains=searchterms.upper())
        elif searchtype == 'b':
            series = Series.objects.filter(
                search_ser__startswith=searchterms.upper())
        elif searchtype == 'e':
            series = Series.objects.filter(search_ser=searchterms.upper())

        #if len(series)>0:
        #    series = series.order_by('ser')
        series = series.annotate(
            count_book=Count('book')).distinct().order_by('search_ser')

        # Создаем результирующее множество
        series_count = series.count()
        op = OPDS_Paginator(series_count, 0, page_num, config.SOPDS_MAXITEMS,
                            HALF_PAGES_LINKS)
        items = []
        for row in series[op.d1_first_pos:op.d1_last_pos + 1]:
            #p = {'id':row.id, 'ser':row.ser, 'lang_code': row.lang_code, 'book_count': Book.objects.filter(series=row).count()}
            p = {
                'id': row.id,
                'ser': row.ser,
                'lang_code': row.lang_code,
                'book_count': row.count_book
            }
            items.append(p)

        args['paginator'] = op.get_data_dict()
        args['searchterms'] = searchterms
        args['searchtype'] = searchtype
        args['series'] = items
        args['searchobject'] = 'series'
        args['current'] = 'search'
        args['breadcrumbs'] = [_('Series'), _('Search'), searchterms]
        args['cache_id'] = '%s:%s:%s' % (searchterms, searchtype, op.page_num)
        args['cache_t'] = config.SOPDS_CACHE_TIME

    return render(request, 'sopds_series.html', args)
Exemplo n.º 4
0
def SearchAuthorsView(request):
    #Read searchtype, searchterms, searchterms0, page from form
    args = {}
    args.update(csrf(request))

    if request.GET:
        searchtype = request.GET.get('searchtype', 'm')
        searchterms = request.GET.get('searchterms', '')
        #searchterms0 = int(request.POST.get('searchterms0', ''))
        page_num = int(request.GET.get('page', '1'))
        page_num = page_num if page_num > 0 else 1

        if searchtype == 'm':
            authors = Author.objects.filter(
                search_full_name__contains=searchterms.upper()).order_by(
                    'search_full_name')
        elif searchtype == 'b':
            authors = Author.objects.filter(
                search_full_name__startswith=searchterms.upper()).order_by(
                    'search_full_name')
        elif searchtype == 'e':
            authors = Author.objects.filter(search_full_name=searchterms.upper(
            )).order_by('search_full_name')

        # Создаем результирующее множество
        authors_count = authors.count()
        op = OPDS_Paginator(authors_count, 0, page_num, config.SOPDS_MAXITEMS,
                            HALF_PAGES_LINKS)
        items = []

        for row in authors[op.d1_first_pos:op.d1_last_pos + 1]:
            p = {
                'id': row.id,
                'full_name': row.full_name,
                'lang_code': row.lang_code,
                'book_count': Book.objects.filter(authors=row).count()
            }
            items.append(p)

        args['paginator'] = op.get_data_dict()
        args['searchterms'] = searchterms
        args['searchtype'] = searchtype
        args['authors'] = items
        args['searchobject'] = 'author'
        args['current'] = 'search'
        args['breadcrumbs'] = [_('Authors'), _('Search'), searchterms]
        args['cache_id'] = '%s:%s:%s' % (searchterms, searchtype, op.page_num)
        args['cache_t'] = config.SOPDS_CACHE_TIME

    return render(request, 'sopds_authors.html', args)
Exemplo n.º 5
0
    def get_object(self, request, searchterms, searchtype, page=1):
        if not isinstance(page, int):
            page = int(page)
        page_num = page if page>0 else 1   

        if searchtype == 'm':
            authors = Author.objects.filter(search_full_name__contains=searchterms.upper()).order_by("search_full_name")
        elif searchtype == 'b':
            authors = Author.objects.filter(search_full_name__startswith=searchterms.upper()).order_by("search_full_name") 
        elif searchtype == 'e':
            authors = Author.objects.filter(search_full_name=searchterms.upper()).order_by("search_full_name")
            
        # Создаем результирующее множество
        authors_count = authors.count()
        op = OPDS_Paginator(authors_count, 0, page_num, config.SOPDS_MAXITEMS)        
        items = []
        
        for row in authors[op.d1_first_pos:op.d1_last_pos+1]:
            p = {'id':row.id, 'full_name':row.full_name, 'lang_code': row.lang_code, 'book_count': Book.objects.filter(authors=row).count()}
            items.append(p)    
                                
        return {"authors":items, "searchterms":searchterms, "searchtype":searchtype, "paginator":op.get_data_dict()}
Exemplo n.º 6
0
    def get_object(self, request, cat_id=None, page=1):
        if not isinstance(page, int):
            page = int(page)
        page_num = page if page>0 else 1

        try:
            if cat_id is not None:
                cat = Catalog.objects.get(id=cat_id)
            else:
                cat = Catalog.objects.get(parent__id=cat_id)
        except Catalog.DoesNotExist:
            cat = None
        
        catalogs_list = Catalog.objects.filter(parent=cat).order_by("cat_name")
        catalogs_count = catalogs_list.count()
        # prefetch_related on sqlite on items >999 therow error "too many SQL variables"
        #books_list = Book.objects.filter(catalog=cat).prefetch_related('authors','genres','series').order_by("title")
        books_list = Book.objects.filter(catalog=cat).order_by("search_title")
        books_count = books_list.count()
        
        # Получаем результирующий список
        op = OPDS_Paginator(catalogs_count, books_count, page_num, config.SOPDS_MAXITEMS)
        items = []
        
        for row in catalogs_list[op.d1_first_pos:op.d1_last_pos+1]:
            p = {'is_catalog':1, 'title': row.cat_name,'id': row.id, 'cat_type':row.cat_type, 'parent_id':row.parent_id}       
            items.append(p)
              
        for row in books_list[op.d2_first_pos:op.d2_last_pos+1]:
            p = {'is_catalog':0, 'lang_code': row.lang_code, 'filename': row.filename, 'path': row.path, \
                  'registerdate': row.registerdate, 'id': row.id, 'annotation': strip_tags(row.annotation), \
                  'docdate': row.docdate, 'format': row.format, 'title': row.title, 'filesize': row.filesize//1000,
                  'authors':row.authors.values(), 'genres':row.genres.values(), 'series':row.series.values(), 'ser_no':row.bseries_set.values('ser_no')}
            items.append(p)
            
        return items, cat, op.get_data_dict()            
Exemplo n.º 7
0
def SearchBooksView(request):
    #Read searchtype, searchterms, searchterms0, page from form
    args = {}
    args.update(csrf(request))

    if request.GET:
        searchtype = request.GET.get('searchtype', 'm')
        searchterms = request.GET.get('searchterms', '')
        #searchterms0 = int(request.POST.get('searchterms0', ''))
        page_num = int(request.GET.get('page', '1'))
        page_num = page_num if page_num > 0 else 1

        #if (len(searchterms)<3) and (searchtype in ('m', 'b', 'e')):
        #    args['errormsg'] = 'Too few symbols in search string !';
        #    return render_to_response('sopds_error.html', args)

        if searchtype == 'm':
            #books = Book.objects.extra(where=["upper(title) like %s"], params=["%%%s%%"%searchterms.upper()]).order_by('title','-docdate')
            books = Book.objects.filter(
                search_title__contains=searchterms.upper()).order_by(
                    'search_title', '-docdate')
            args['breadcrumbs'] = [
                _('Books'), _('Search by title'), searchterms
            ]
            args['searchobject'] = 'title'

        if searchtype == 'b':
            #books = Book.objects.extra(where=["upper(title) like %s"], params=["%s%%"%searchterms.upper()]).order_by('title','-docdate')
            books = Book.objects.filter(
                search_title__startswith=searchterms.upper()).order_by(
                    'search_title', '-docdate')
            args['breadcrumbs'] = [
                _('Books'), _('Search by title'), searchterms
            ]
            args['searchobject'] = 'title'

        elif searchtype == 'a':
            try:
                author_id = int(searchterms)
                author = Author.objects.get(id=author_id)
                #aname = "%s %s"%(author.last_name,author.first_name)
                aname = author.full_name
            except:
                author_id = 0
                aname = ""
            books = Book.objects.filter(authors=author_id).order_by(
                'search_title', '-docdate')
            args['breadcrumbs'] = [_('Books'), _('Search by author'), aname]
            args['searchobject'] = 'author'

        # Поиск книг по серии
        elif searchtype == 's':
            try:
                ser_id = int(searchterms)
                ser = Series.objects.get(id=ser_id).ser
            except:
                ser_id = 0
                ser = ""
            books = Book.objects.filter(series=ser_id).order_by(
                'search_title', '-docdate')
            args['breadcrumbs'] = [_('Books'), _('Search by series'), ser]
            args['searchobject'] = 'series'

        # Поиск книг по жанру
        elif searchtype == 'g':
            try:
                genre_id = int(searchterms)
                section = Genre.objects.get(id=genre_id).section
                subsection = Genre.objects.get(id=genre_id).subsection
                args['breadcrumbs'] = [
                    _('Books'),
                    _('Search by genre'), section, subsection
                ]
            except:
                genre_id = 0
                args['breadcrumbs'] = [_('Books'), _('Search by genre')]

            books = Book.objects.filter(genres=genre_id).order_by(
                'search_title', '-docdate')
            args['searchobject'] = 'genre'

        # Поиск книг на книжной полке
        elif searchtype == 'u':
            if config.SOPDS_AUTH:
                books = Book.objects.filter(bookshelf__user=request.user
                                            ).order_by('-bookshelf__readtime')
                args['breadcrumbs'] = [
                    _('Books'),
                    _('Bookshelf'), request.user.username
                ]
                #books = bookshelf.objects.filter(user=request.user).select_related('book')
            else:
                books = Book.objects.filter(id=0)
                args['breadcrumbs'] = [_('Books'), _('Bookshelf')]
            args['searchobject'] = 'title'
            args['isbookshelf'] = 1

        # Поиск дубликатов для книги
        elif searchtype == 'd':
            #try:
            book_id = int(searchterms)
            mbook = Book.objects.get(id=book_id)
            books = Book.objects.filter(
                title=mbook.title, authors__in=mbook.authors.all()).exclude(
                    id=book_id).distinct().order_by('-docdate')
            args['breadcrumbs'] = [
                _('Books'), _('Doubles for book'), mbook.title
            ]
            args['searchobject'] = 'title'

        # Поиск книги по ID. Хотел найти еще и дубликаты к книге, но почему-то не работает запрос правильно. Ума не приложу почему.
        elif searchtype == 'i':
            try:
                book_id = int(searchterms)
                #mbook = Book.objects.get(id=book_id)
            except:
                book_id = 0
                #mbook = None
            books = Book.objects.filter(id=book_id)
            args['breadcrumbs'] = [_('Books'), books[0].title]
            #books = Book.objects.filter(title=mbook.title, authors__in=mbook.authors.all()).distinct().order_by('-docdate')
            #args['breadcrumbs'] = [_('Books'),mbook.title]
            args['searchobject'] = 'title'

        # prefetch_related on sqlite on items >999 therow error "too many SQL variables"
        #if len(books)>0:
        #    books = books.select_related('authors','genres','series')

        # Фильтруем дубликаты и формируем выдачу затребованной страницы
        books_count = books.count()
        op = OPDS_Paginator(books_count, 0, page_num, config.SOPDS_MAXITEMS,
                            HALF_PAGES_LINKS)
        items = []

        prev_title = ''
        prev_authors_set = set()

        # Начаинам анализ с последнего элемента на предидущей странице, чторбы он "вытянул" с этой страницы
        # свои дубликаты если они есть
        summary_DOUBLES_HIDE = config.SOPDS_DOUBLES_HIDE and (searchtype !=
                                                              'd')
        start = op.d1_first_pos if (
            (op.d1_first_pos == 0) or
            (not summary_DOUBLES_HIDE)) else op.d1_first_pos - 1
        finish = op.d1_last_pos

        for row in books[start:finish + 1]:
            p = {'doubles':0, 'lang_code': row.lang_code, 'filename': row.filename, 'path': row.path, \
                  'registerdate': row.registerdate, 'id': row.id, 'annotation': strip_tags(row.annotation), \
                  'docdate': row.docdate, 'format': row.format, 'title': row.title, 'filesize': row.filesize//1000,
                  'authors':row.authors.values(), 'genres':row.genres.values(), 'series':row.series.values()}
            if summary_DOUBLES_HIDE:
                title = p['title']
                authors_set = {a['id'] for a in p['authors']}
                if title.upper() == prev_title.upper(
                ) and authors_set == prev_authors_set:
                    items[-1]['doubles'] += 1
                else:
                    items.append(p)
                prev_title = title
                prev_authors_set = authors_set
            else:
                items.append(p)

        # "вытягиваем" дубликаты книг со следующей страницы и удаляем первый элемент который с предыдущей страницы и "вытягивал" дубликаты с текущей
        if summary_DOUBLES_HIDE:
            double_flag = True
            while ((finish + 1) < books_count) and double_flag:
                finish += 1
                if books[finish].title.upper() == prev_title.upper() and {
                        a['id']
                        for a in books[finish].authors.values()
                } == prev_authors_set:
                    items[-1]['doubles'] += 1
                else:
                    double_flag = False

            if op.d1_first_pos != 0:
                items.pop(0)

        args['paginator'] = op.get_data_dict()
        args['searchterms'] = searchterms
        args['searchtype'] = searchtype
        args['books'] = items
        args['current'] = 'search'

    return render(request, 'sopds_books.html', args)
Exemplo n.º 8
0
    def BookPager(self, books, page_num, query):
        books_count = books.count()
        op = OPDS_Paginator(books_count, 0, page_num,
                            config.SOPDS_TELEBOT_MAXITEMS, HALF_PAGES_LINKS)
        items = []

        prev_title = ''
        prev_authors_set = set()

        # Начаинам анализ с последнего элемента на предидущей странице, чторбы он "вытянул" с этой страницы
        # свои дубликаты если они есть
        summary_DOUBLES_HIDE = config.SOPDS_DOUBLES_HIDE
        start = op.d1_first_pos if (
            (op.d1_first_pos == 0) or
            (not summary_DOUBLES_HIDE)) else op.d1_first_pos - 1
        finish = op.d1_last_pos

        for row in books[start:finish + 1]:
            p = {
                'doubles': 0,
                'lang_code': row.lang_code,
                'filename': row.filename,
                'path': row.path,
                'registerdate': row.registerdate,
                'id': row.id,
                'annotation': strip_tags(row.annotation),
                'docdate': row.docdate,
                'format': row.format,
                'title': row.title,
                'filesize': row.filesize // 1000,
                'authors': row.authors.values(),
                'genres': row.genres.values(),
                'series': row.series.values(),
                'ser_no': row.bseries_set.values('ser_no'),
            }
            if summary_DOUBLES_HIDE:
                title = p['title']
                authors_set = {a['id'] for a in p['authors']}
                if title.upper() == prev_title.upper(
                ) and authors_set == prev_authors_set:
                    items[-1]['doubles'] += 1
                else:
                    items.append(p)
                prev_title = title
                prev_authors_set = authors_set
            else:
                items.append(p)

        # "вытягиваем" дубликаты книг со следующей страницы и удаляем первый элемент который с предыдущей страницы и "вытягивал" дубликаты с текущей
        if summary_DOUBLES_HIDE:
            double_flag = True
            while ((finish + 1) < books_count) and double_flag:
                finish += 1
                if books[finish].title.upper() == prev_title.upper() and {
                        a['id']
                        for a in books[finish].authors.values()
                } == prev_authors_set:
                    items[-1]['doubles'] += 1
                else:
                    double_flag = False

            if op.d1_first_pos != 0:
                items.pop(0)

        response = ''
        for b in items:
            authors = ', '.join([a['full_name'] for a in b['authors']])
            doubles = _("(doubles:%s) ") % b['doubles'] if b['doubles'] else ''
            response += '<b>%(title)s</b>\n%(author)s\n%(dbl)s/download%(link)s\n\n' % {
                'title': b['title'],
                'author': authors,
                'link': b['id'],
                'dbl': doubles
            }

        buttons = [
            InlineKeyboardButton('1 <<',
                                 callback_data='%s%s%s' %
                                 (query, query_delimiter, 1)),
            InlineKeyboardButton(
                '%s <' % op.previous_page_number,
                callback_data='%s%s%s' %
                (query, query_delimiter, op.previous_page_number)),
            InlineKeyboardButton('[ %s ]' % op.number,
                                 callback_data='%s%s%s' %
                                 (query, query_delimiter, 'current')),
            InlineKeyboardButton(
                '> %s' % op.next_page_number,
                callback_data='%s%s%s' %
                (query, query_delimiter, op.next_page_number)),
            InlineKeyboardButton('>> %s' % op.num_pages,
                                 callback_data='%s%s%s' %
                                 (query, query_delimiter, op.num_pages)),
        ]

        markup = InlineKeyboardMarkup([buttons]) if op.num_pages > 1 else None

        return {'message': response, 'buttons': markup}
Exemplo n.º 9
0
 def get_object(self, request, searchtype="m", searchterms=None, searchterms0=None, page=1):   
     if not isinstance(page, int):
         page = int(page)
     page_num = page if page>0 else 1
     
     # Поиск книг по подсроке
     if  searchtype == 'm':
         #books = Book.objects.extra(where=["upper(title) like %s"], params=["%%%s%%"%searchterms.upper()]).order_by('title','-docdate')
         books = Book.objects.filter(search_title__contains=searchterms.upper()).order_by('search_title','-docdate')
     # Поиск книг по начальной подстроке
     elif searchtype == 'b':
         #books = Book.objects.extra(where=["upper(title) like %s"], params=["%s%%"%searchterms.upper()]).order_by('title','-docdate')
         books = Book.objects.filter(search_title__startswith=searchterms.upper()).order_by('search_title','-docdate')
     # Поиск книг по точному совпадению наименования
     elif searchtype == 'e':
         #books = Book.objects.extra(where=["upper(title)=%s"], params=["%s"%searchterms.upper()]).order_by('title','-docdate')
         books = Book.objects.filter(search_title=searchterms.upper()).order_by('search_title','-docdate')
     # Поиск книг по автору
     elif searchtype == 'a':
         try:
             author_id = int(searchterms)
         except:
             author_id = 0
         books = Book.objects.filter(authors=author_id).order_by('search_title','-docdate')
     # Поиск книг по серии
     elif searchtype == 's':
         try:
             ser_id = int(searchterms)
         except:
             ser_id = 0
         #books = Book.objects.filter(series=ser_id).order_by('search_title','-docdate')
         books = Book.objects.filter(series=ser_id).order_by('bseries__ser_no', 'search_title', '-docdate')
     # Поиск книг по автору и серии
     elif searchtype == "as":
         try:
             ser_id = int(searchterms0)
             author_id = int(searchterms)
         except:
             ser_id = 0
             author_id = 0 
         books = Book.objects.filter(authors=author_id, series=ser_id if ser_id else None).order_by('bseries__ser_no', 'search_title', '-docdate')
     # Поиск книг по жанру
     elif searchtype == 'g':
         try:
             genre_id = int(searchterms)
         except:
             genre_id = 0
         books = Book.objects.filter(genres=genre_id).order_by('search_title','-docdate')    
     # Поиск книг на книжной полке            
     elif searchtype == 'u':
         if config.SOPDS_AUTH:
             books = Book.objects.filter(bookshelf__user=request.user).order_by('-bookshelf__readtime')
         else:
             books=Book.objects.filter(id=0)  
     # Поиск дубликатов для книги            
     elif searchtype == 'd':
         book_id = int(searchterms)
         mbook = Book.objects.get(id=book_id)
         books = Book.objects.filter(title__iexact=mbook.title, authors__in=mbook.authors.all()).exclude(id=book_id).order_by('search_title','-docdate')
                 
     # prefetch_related on sqlite on items >999 therow error "too many SQL variables"
     #if len(books)>0:            
         #books = books.prefetch_related('authors','genres','series').order_by('title','authors','-docdate')
              
     # Фильтруем дубликаты
     books_count = books.count()
     op = OPDS_Paginator(books_count, 0, page_num,config.SOPDS_MAXITEMS)
     items = []
     
     prev_title = ''
     prev_authors_set = set()
     
     # Начаинам анализ с последнего элемента на предидущей странице, чторбы он "вытянул" с этой страницы
     # свои дубликаты если они есть
     summary_DOUBLES_HIDE =  config.SOPDS_DOUBLES_HIDE and (searchtype != 'd')
     start = op.d1_first_pos if ((op.d1_first_pos==0) or (not summary_DOUBLES_HIDE)) else op.d1_first_pos-1
     finish = op.d1_last_pos
     
     for row in books[start:finish+1]:
         p = {'doubles':0, 'lang_code': row.lang_code, 'filename': row.filename, 'path': row.path, \
               'registerdate': row.registerdate, 'id': row.id, 'annotation': strip_tags(row.annotation), \
               'docdate': row.docdate, 'format': row.format, 'title': row.title, 'filesize': row.filesize//1000,
               'authors':row.authors.values(), 'genres':row.genres.values(), 'series':row.series.values(), 'ser_no':row.bseries_set.values('ser_no')}
         if summary_DOUBLES_HIDE:
             title = p['title'] 
             authors_set = {a['id'] for a in p['authors']}         
             if title.upper()==prev_title.upper() and authors_set==prev_authors_set:
                 items[-1]['doubles']+=1
             else:
                 items.append(p)                   
             prev_title = title
             prev_authors_set = authors_set
         else:
             items.append(p)
             
     # "вытягиваем" дубликаты книг со следующей страницы и удаляем первый элемент который с предыдущей страницы и "вытягивал" дубликаты с текущей
     if summary_DOUBLES_HIDE:
         double_flag = True
         while ((finish+1)<books_count) and double_flag:
             finish += 1  
             if books[finish].title.upper()==prev_title.upper() and {a['id'] for a in books[finish].authors.values()}==prev_authors_set:
                 items[-1]['doubles']+=1
             else:
                 double_flag = False   
         
         if op.d1_first_pos!=0:     
             items.pop(0)          
               
     return {"books":items, "searchterms":searchterms, "searchterms0":searchterms0, "searchtype":searchtype, "paginator":op.get_data_dict()}