def bookmarcs(request): #print list(Bookmarc.objects.raw("SELECT id, gen_id FROM rbooks_bookmarc WHERE user_id=%s GROUP BY gen_id ORDER BY add_date DESC", params=[request.user.id])) bookmarcs = Bookmarc.objects.values('id', 'gen_id').filter(user=request.user).order_by('-add_date') gen_ids = {} for bookmarc in bookmarcs: gen_ids[bookmarc.gen_id] = {'bookmarc': bookmarc} for record in Record.objects.using('records').filter(gen_id__in=gen_ids.keys()): doc_tree = etree.XML(record.content) doc_tree = xslt_bib_draw_transformer(doc_tree) gen_ids[record.gen_id]['record']= record gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(u'<b/>', u' '), for record in Ebook.objects.using('records').filter(gen_id__in=gen_ids): doc_tree = etree.XML(record.content) doc_tree = xslt_bib_draw_transformer(doc_tree) gen_ids[record.gen_id]['record'] = record gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(u'<b/>', u' '), records = [] for bookmarc in bookmarcs: records.append(gen_ids[bookmarc.gen_id]) return render(request, 'rbooks/frontend/bookmarcs.html', { 'records': records })
def participant_income(request): sigla = request.GET.get('sigla', None) solr = sunburnt.SolrInterface(settings.SOLR['local_records_host']) if sigla: query = solr.Q(**{'holder-sigla_s': sigla}) else: query = solr.Q(**{'*': '*'}) solr_searcher = solr.query(query) solr_searcher = solr_searcher.field_limit("id") solr_searcher = solr_searcher.sort_by('-record-create-date_dts') paginator = Paginator(solr_searcher, 20) # Show 25 contacts per page page = request.GET.get('page') try: results_page = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. results_page = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. results_page = paginator.page(paginator.num_pages) docs = [] for row in results_page.object_list: docs.append(replace_doc_attrs(row)) doc_ids = [] for doc in docs: doc_ids.append(doc['id']) records_dict = {} records = list(Record.objects.using('local_records').filter(gen_id__in=doc_ids)) for record in records: records_dict[record.gen_id] = etree.tostring( xslt_bib_draw_transformer(etree.XML(record.content), abstract='false()'), encoding='utf-8') for doc in docs: doc['record'] = records_dict.get(doc['id']) return render(request, 'ssearch/frontend/income.html', { 'results_page': results_page, 'docs': docs })
def participant_income(request): sigla = request.GET.get("sigla", None) solr_connection = httplib2.Http(disable_ssl_certificate_validation=True) solr = sunburnt.SolrInterface(settings.SOLR["local_records_host"], http_connection=solr_connection) if sigla: query = solr.Q(**{"holder-sigla_s": sigla}) else: query = solr.Q(**{"*": "*"}) solr_searcher = solr.query(query) solr_searcher = solr_searcher.field_limit("id") solr_searcher = solr_searcher.sort_by("-record-create-date_dts") paginator = Paginator(solr_searcher, 20) # Show 25 contacts per page page = request.GET.get("page") try: results_page = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. results_page = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. results_page = paginator.page(paginator.num_pages) docs = [] for row in results_page.object_list: docs.append(replace_doc_attrs(row)) doc_ids = [] for doc in docs: doc_ids.append(doc["id"]) records_dict = {} records = list(Record.objects.using("local_records").filter(gen_id__in=doc_ids)) for record in records: records_dict[record.gen_id] = etree.tostring( xslt_bib_draw_transformer(etree.XML(record.content), abstract="false()"), encoding="utf-8" ) for doc in docs: doc["record"] = records_dict.get(doc["id"]) return render(request, "ssearch/frontend/income.html", {"results_page": results_page, "docs": docs})
def participant_income(sigla): solr_connection = httplib2.Http(disable_ssl_certificate_validation=True) solr = sunburnt.SolrInterface(settings.SOLR['local_records_host'], http_connection=solr_connection) if sigla: query = solr.Q(**{'holder-sigla_s': sigla}) else: query = solr.Q(**{'*': '*'}) solr_searcher = solr.query(query) solr_searcher = solr_searcher.field_limit(['id', 'record-create-date_dts']) solr_searcher = solr_searcher.sort_by('-record-create-date_dts') paginator = Paginator(solr_searcher, 10) # Show 25 contacts per page # If page is not an integer, deliver first page. results_page = paginator.page(1) docs = [] for row in results_page.object_list: docs.append(replace_doc_attrs(row)) doc_ids = [] for doc in docs: doc_ids.append(doc['id']) records_dict = {} records = list(Record.objects.using('local_records').filter(gen_id__in=doc_ids)) for record in records: records_dict[record.gen_id] = rusmarc_template.beautify(etree.tostring( xslt_bib_draw_transformer(etree.XML(record.content), abstract='false()'), encoding='utf-8')) for doc in docs: doc['record'] = records_dict.get(doc['id']) return { #'results_page': results_page, 'docs': docs, 'sigla': sigla }
def index(request): saved_docs = SavedDocument.objects.filter(user=request.user) gen_ids = {} for saved_doc in saved_docs: gen_ids[saved_doc.gen_id] = {'saved_doc':saved_doc} for record in Record.objects.using('records').filter(gen_id__in=gen_ids.keys()): doc_tree = etree.XML(record.content) doc_tree = xslt_bib_draw_transformer(doc_tree) gen_ids[record.gen_id]['record']=record gen_ids[record.gen_id]['bib']=etree.tostring(doc_tree).replace(u'<b/>', u' '), records = [] for saved_doc in saved_docs: records.append(gen_ids[saved_doc.gen_id]) return render(request, 'mydocs/frontend/index.html', { 'records': records })
def to_print(request, gen_id): catalog = None try: record = Record.objects.using("records").get(gen_id=gen_id) catalog = "records" except Record.DoesNotExist: raise Http404() # DetailAccessLog(catalog=catalog, gen_id=record.gen_id).save() # DetailAccessLog.objects.create(catalog=catalog, gen_id=gen_id, date_time=datetime.datetime.now()) doc_tree = etree.XML(record.content) # leader8 = doc_tree.xpath('/record/leader/leader08') # # analitic_level = u'0' # if len(leader8) == 1: # analitic_level = leader8[0].text bib_tree = xslt_bib_draw_transformer(doc_tree) marct_tree = xslt_marc_dump_transformer(doc_tree) bib_dump = etree.tostring(bib_tree, encoding="utf-8") # marc_dump = etree.tostring(marct_tree, encoding='utf-8') doc_tree_t = xslt_transformer(doc_tree) doc = doc_tree_to_dict(doc_tree_t) # holders = doc.get('holders', list()) # if holders: # # оставляем уникальных держателей # doc['holders'] = list(set(holders)) # linked_docs = [] # if analitic_level == '1': # doc['holders'] = [] # # solr = sunburnt.SolrInterface(settings.SOLR['host']) # linked_query = solr.query(**{'linked-record-number_s':record.record_id.replace(u"\\",u'\\\\')}) # linked_query = linked_query.field_limit("id") # linked_results = linked_query.execute() # # linked_doc_ids = [] # for linked_doc in linked_results: # linked_doc_ids.append(linked_doc['id']) # # records = list(Ebook.objects.using('records').filter(gen_id__in=linked_doc_ids)) # records += list(Record.objects.using('records').filter(gen_id__in=linked_doc_ids)) # # for record in records: # record_dict = {} # record_dict['record'] = xml_doc_to_dict(record.content) # record_dict['id'] = record.gen_id # linked_docs.append(record_dict) # for doc in mlt_docs: # doc['record'] = records_dict.get(doc['id']) # access_count = DetailAccessLog.objects.filter(catalog=catalog, gen_id=record.gen_id).count() return render( request, "ssearch/frontend/print.html", { "doc_dump": bib_dump.replace("<b/>", ""), # 'marc_dump': marc_dump, "doc": doc, "gen_id": gen_id, # 'linked_docs': linked_docs, # 'access_count': access_count }, )
def detail(request, gen_id): catalog = None try: record = Record.objects.using("records").get(gen_id=gen_id) catalog = "records" except Record.DoesNotExist: raise Http404() # DetailAccessLog(catalog=catalog, gen_id=record.gen_id).save() DetailAccessLog.objects.create(catalog=catalog, gen_id=gen_id, date_time=datetime.datetime.now()) doc_tree = etree.XML(record.content) leader8 = doc_tree.xpath("/record/leader/leader08") analitic_level = u"0" if len(leader8) == 1: analitic_level = leader8[0].text bib_tree = xslt_bib_draw_transformer(doc_tree) marct_tree = xslt_marc_dump_transformer(doc_tree) bib_dump = etree.tostring(bib_tree, encoding="utf-8") marc_dump = etree.tostring(marct_tree, encoding="utf-8") doc_tree_t = xslt_transformer(doc_tree) doc = doc_tree_to_dict(doc_tree_t) holders = doc.get("holders", list()) if holders: # оставляем уникальных держателей doc["holders"] = list(set(holders)) linked_docs = [] if analitic_level == "1": doc["holders"] = [] solr_connection = httplib2.Http(disable_ssl_certificate_validation=True) solr = sunburnt.SolrInterface(settings.SOLR["host"], http_connection=solr_connection) linked_query = solr.query(**{"linked-record-number_s": record.record_id.replace(u"\\", u"\\\\")}) linked_query = linked_query.field_limit("id") linked_results = linked_query.execute() linked_doc_ids = [] for linked_doc in linked_results: linked_doc_ids.append(linked_doc["id"]) records = list(Record.objects.using("records").filter(gen_id__in=linked_doc_ids)) for record in records: record_dict = {} record_dict["record"] = xml_doc_to_dict(record.content) record_dict["id"] = record.gen_id linked_docs.append(record_dict) # for doc in mlt_docs: # doc['record'] = records_dict.get(doc['id']) access_count = DetailAccessLog.objects.filter(catalog=catalog, gen_id=record.gen_id).count() return render( request, "ssearch/frontend/detail.html", { "doc_dump": rusmarc_template.beautify(bib_dump.replace("<b/>", "")), "marc_dump": marc_dump, "doc": doc, "gen_id": gen_id, "linked_docs": linked_docs, "access_count": access_count, }, ) def clean_holder_title(holder): return ( holder.get("org", {}) .get("title", "") .lower() .replace(u"цбс", "") .replace(u"го", "") .replace(u"г.", "") .strip() ) holders.sort(key=clean_holder_title) return holders
def detail(request, gen_id): catalog = None try: record = Record.objects.using('records').get(gen_id=gen_id) catalog = 'records' except Record.DoesNotExist: raise Http404() # DetailAccessLog(catalog=catalog, gen_id=record.gen_id).save() DetailAccessLog.objects.create(catalog=catalog, gen_id=gen_id, date_time=datetime.datetime.now()) doc_tree = etree.XML(record.content) leader8 = doc_tree.xpath('/record/leader/leader08') analitic_level = u'0' if len(leader8) == 1: analitic_level = leader8[0].text bib_tree = xslt_bib_draw_transformer(doc_tree) marct_tree = xslt_marc_dump_transformer(doc_tree) bib_dump = etree.tostring(bib_tree, encoding='utf-8') marc_dump = etree.tostring(marct_tree, encoding='utf-8') doc_tree_t = xslt_transformer(doc_tree) doc = doc_tree_to_dict(doc_tree_t) holders = doc.get('holders', list()) if holders: # оставляем уникальных держателей doc['holders'] = list(set(holders)) linked_docs = [] if analitic_level == '1': doc['holders'] = [] solr = sunburnt.SolrInterface(settings.SOLR['host']) linked_query = solr.query(**{'linked-record-number_s': record.record_id.replace(u"\\", u'\\\\')}) linked_query = linked_query.field_limit("id") linked_results = linked_query.execute() linked_doc_ids = [] for linked_doc in linked_results: linked_doc_ids.append(linked_doc['id']) records = list(Record.objects.using('records').filter(gen_id__in=linked_doc_ids)) for record in records: record_dict = {} record_dict['record'] = xml_doc_to_dict(record.content) record_dict['id'] = record.gen_id linked_docs.append(record_dict) # for doc in mlt_docs: # doc['record'] = records_dict.get(doc['id']) access_count = DetailAccessLog.objects.filter(catalog=catalog, gen_id=record.gen_id).count() return render(request, 'ssearch/frontend/detail.html', { 'doc_dump': rusmarc_template.beautify(bib_dump.replace('<b/>', '')), 'marc_dump': marc_dump, 'doc': doc, 'gen_id': gen_id, 'linked_docs': linked_docs, 'access_count': access_count })