Example #1
0
def bookmarcs(request):
    #print list(Bookmarc.objects.raw("SELECT id, gen_id FROM rbooks_bookmarc WHERE user_id=%s GROUP BY gen_id ORDER BY add_date DESC", params=[request.user.id]))
    bookmarcs = Bookmarc.objects.values('id', 'gen_id').filter(user=request.user).order_by('-add_date')
    gen_ids = {}
    for bookmarc in bookmarcs:
        gen_ids[bookmarc.gen_id] = {'bookmarc': bookmarc}


    for record in Record.objects.using('records').filter(gen_id__in=gen_ids.keys()):
        doc_tree = etree.XML(record.content)
        doc_tree = xslt_bib_draw_transformer(doc_tree)
        gen_ids[record.gen_id]['record']= record
        gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(u'<b/>', u' '),

    for record in Ebook.objects.using('records').filter(gen_id__in=gen_ids):
        doc_tree = etree.XML(record.content)
        doc_tree = xslt_bib_draw_transformer(doc_tree)
        gen_ids[record.gen_id]['record'] = record
        gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(u'<b/>', u' '),

    records = []
    for bookmarc in bookmarcs:
        records.append(gen_ids[bookmarc.gen_id])

    return render(request, 'rbooks/frontend/bookmarcs.html', {
        'records': records
    })
Example #2
0
def bookmarcs(request):
    #print list(Bookmarc.objects.raw("SELECT id, gen_id FROM rbooks_bookmarc WHERE user_id=%s GROUP BY gen_id ORDER BY add_date DESC", params=[request.user.id]))
    bookmarcs = Bookmarc.objects.values(
        'id', 'gen_id').filter(user=request.user).order_by('-add_date')
    gen_ids = {}
    for bookmarc in bookmarcs:
        gen_ids[bookmarc.gen_id] = {'bookmarc': bookmarc}

    for record in Record.objects.using('records').filter(
            gen_id__in=gen_ids.keys()):
        doc_tree = etree.XML(record.content)
        doc_tree = xslt_bib_draw_transformer(doc_tree)
        gen_ids[record.gen_id]['record'] = record
        gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(
            u'<b/>', u' '),

    for record in Ebook.objects.using('records').filter(gen_id__in=gen_ids):
        doc_tree = etree.XML(record.content)
        doc_tree = xslt_bib_draw_transformer(doc_tree)
        gen_ids[record.gen_id]['record'] = record
        gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(
            u'<b/>', u' '),

    records = []
    for bookmarc in bookmarcs:
        records.append(gen_ids[bookmarc.gen_id])

    return render(request, 'rbooks/frontend/bookmarcs.html',
                  {'records': records})
Example #3
0
def index(request):
    q = Q(user=request.user)
    list_id = request.GET.get('list', '')
    list = None
    if list_id:
        try:
            list = List.objects.get(id=list_id)
            q &= Q(list=list)
        except List.DoesNotExist:
            pass
    saved_docs = SavedDocument.objects.filter(q)
    lists = List.objects.filter(user=request.user)

    gen_ids = {}
    for saved_doc in saved_docs:
        gen_ids[saved_doc.gen_id] = {'saved_doc': saved_doc}

    for record in Record.objects.using('records').filter(gen_id__in=gen_ids.keys()):
        doc_tree = etree.XML(record.content)
        doc_tree = xslt_bib_draw_transformer(doc_tree)
        gen_ids[record.gen_id]['record'] = record
        gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(u'<b/>', u' '),

    records = []
    for saved_doc in saved_docs:
        records.append(gen_ids[saved_doc.gen_id])

    return render(request, 'mydocs/frontend/index.html', {
        'records': records,
        'lists': lists,
        'list': list,
    })
Example #4
0
def index(request):
    q = Q(user=request.user)
    list_id = request.GET.get('list', '')
    list = None
    if list_id:
        try:
            list = List.objects.get(id=list_id)
            q &= Q(list=list)

        except List.DoesNotExist:
            pass
    saved_docs = SavedDocument.objects.filter(q)
    lists = List.objects.all()

    gen_ids = {}
    for saved_doc in saved_docs:
        gen_ids[saved_doc.gen_id] = {'saved_doc': saved_doc}

    for record in Record.objects.using('records').filter(
            gen_id__in=gen_ids.keys()):
        doc_tree = etree.XML(record.content)
        doc_tree = xslt_bib_draw_transformer(doc_tree)
        gen_ids[record.gen_id]['record'] = record
        gen_ids[record.gen_id]['bib'] = etree.tostring(doc_tree).replace(
            u'<b/>', u' '),

    records = []
    for saved_doc in saved_docs:
        records.append(gen_ids[saved_doc.gen_id])

    return render(request, 'mydocs/frontend/index.html', {
        'records': records,
        'lists': lists,
        'list': list,
    })
Example #5
0
def _get_books(xml):
    #    url='http://www.unilib.neva.ru/cgi-bin/zurlcirc?z39.50r://%s:%[email protected]/circ?8003330' % (lib_login, lib_password)
    #    opener = urllib2.build_opener()
    #    result = opener.open(url)
    #    results = result.read()
    xml = xml.replace('<record syntax="">', '<record syntax="RUSmarc">')
    try:
        rcords_root = etree.XML(xml)
    except etree.XMLSyntaxError:
        return []
    books = []
    record_trees = rcords_root.xpath('/records/*')
    for record_tree in record_trees:
        rcord_root = copy.deepcopy(record_tree)  # иначе возникнет ошибка трансформации
        book = {}
        bib_record = xslt_bib_draw_transformer(rcord_root, abstract='false()')
        book['record'] = etree.tostring(bib_record, encoding='utf-8')

        description_tree = rcord_root.xpath('field[@id="999"]/subfield[@id="z"]')
        if description_tree:
            book['description'] = description_tree[0].text
        else:
            book['description'] = u''
        books.append(book)
    return books
Example #6
0
def print_to_pdf(request):
    # print 1,2
    doc_ids = request.POST.getlist('selected')

    document = Document()

    # document.add_heading('Document Title', 0)

    records = get_records(doc_ids)
    for i, record in enumerate(records):
        doc_tree = etree.XML(record.content)
        bib_tree = xslt_bib_draw_transformer(doc_tree)
        bib_dump = etree.tostring(bib_tree, encoding='utf-8')

        libcard = strip_tags(rusmarc_template.beautify(bib_dump.replace('<b/>', '')).decode('utf-8'))
        while True:
            res = re.search('\.\w', libcard, flags=re.IGNORECASE|re.MULTILINE|re.UNICODE)
            if res is None:
                break
            span = res.span()
            libcard = libcard[0: span[0] + 1] + ' ' + libcard[span[1] - 1: ]
        # libcard = u'. '.join(re.split('\.\w', libcard, flags=re.IGNORECASE | re.UNICODE))
        document.add_paragraph(unicode(i + 1) + u'. ' + libcard)

    response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
    response['Content-Disposition'] = 'attachment; filename=print.docx'
    document.save(response)
    return response
Example #7
0
def _get_orders(xml):
    try:
        orders_root = etree.XML(xml)
    except etree.XMLSyntaxError:
        return []

    order_trees = orders_root.xpath('/result/eSTaskPackage')
    orders = []
    for order_tree in order_trees:
        order = {}
        record_tree = order_tree.xpath('taskSpecificParameters/targetPart/itemRequest/record')
        record_root = copy.deepcopy(record_tree[0])  # иначе возникнет ошибка трансформации
        if record_tree:
            try:
                bib_record = xslt_bib_draw_transformer(record_root, abstract='false()')
                order['record'] = etree.tostring(bib_record, encoding='utf-8').replace('<b/>', '')
            except etree.XSLTApplyError as e:
                order['record'] = e.message

        status_or_error_report = order_tree.xpath('taskSpecificParameters/targetPart/statusOrErrorReport')
        if status_or_error_report:
            order['status_or_error_report'] = status_or_error_report[0].text
        else:
            order['status_or_error_report'] = u'undefined'

        target_reference = order_tree.xpath('targetReference')
        if target_reference:
            order['target_reference'] = target_reference[0].text
        else:
            order['target_reference'] = u'undefined'

        task_status = order_tree.xpath('taskStatus')
        if task_status:
            status_titles = {
                '0': u'Не выполнен',
                '3': u'Отказ',
                '1': u'Выполнен',
                '2': u'Выдан'
            }
            order['task_status'] = status_titles.get(task_status[0].text, task_status[0].text)
        else:
            order['task_status'] = u'undefined'

        creation_date_time = order_tree.xpath('creationDateTime')
        if creation_date_time:
            try:
                date = datetime.datetime.strptime(creation_date_time[0].text, '%Y%m%d%H%M%S')
            except ValueError:
                date = u'value error'
            order['creation_date_time'] = date
        else:
            order['creation_date_time'] = u'undefined'

        orders.append(order)
    return orders
Example #8
0
def participant_income(request):
    sigla = request.GET.get('sigla', None)
    solr_connection = httplib2.Http(disable_ssl_certificate_validation=True)
    solr = sunburnt.SolrInterface(settings.SOLR['local_records_host'],
                                  http_connection=solr_connection)

    if sigla:
        query = solr.Q(**{'holder-sigla_s': sigla})
    else:
        query = solr.Q(**{'*': '*'})

    solr_searcher = solr.query(query)
    solr_searcher = solr_searcher.field_limit("id")

    solr_searcher = solr_searcher.sort_by('-record-create-date_dts')

    paginator = Paginator(solr_searcher, 20)  # Show 25 contacts per page

    page = request.GET.get('page')
    try:
        results_page = paginator.page(page)
    except PageNotAnInteger:
        # If page is not an integer, deliver first page.
        results_page = paginator.page(1)
    except EmptyPage:
        # If page is out of range (e.g. 9999), deliver last page of results.
        results_page = paginator.page(paginator.num_pages)

    docs = []

    for row in results_page.object_list:
        docs.append(replace_doc_attrs(row))

    doc_ids = []
    for doc in docs:
        doc_ids.append(doc['id'])

    records_dict = {}
    records = list(
        Record.objects.using('local_records').filter(gen_id__in=doc_ids))
    for record in records:
        records_dict[record.gen_id] = etree.tostring(xslt_bib_draw_transformer(
            etree.XML(record.content), abstract='false()'),
                                                     encoding='utf-8')

    for doc in docs:
        doc['record'] = records_dict.get(doc['id'])

    return render(request, 'ssearch/frontend/income.html', {
        'results_page': results_page,
        'docs': docs
    })
Example #9
0
def get_bib_records(bib_ids):
    bib_records = []
    if bib_ids:
        records = get_records(bib_ids)
        for record in records:
            doc_tree = etree.XML(record.content)
            bib_tree = xslt_bib_draw_transformer(doc_tree)
            bib_dump = etree.tostring(bib_tree, encoding='utf-8')
            bib_records.append({
                'record': record,
                'card': beautify(bib_dump.replace('<b/>', '')),
            })

    return bib_records
Example #10
0
def participant_income(request):
    sigla = request.GET.get('sigla', None)
    solr_connection = httplib2.Http(disable_ssl_certificate_validation=True)
    solr = sunburnt.SolrInterface(settings.SOLR['local_records_host'], http_connection=solr_connection)

    if sigla:
        query = solr.Q(**{'holder-sigla_s': sigla})
    else:
        query = solr.Q(**{'*': '*'})

    solr_searcher = solr.query(query)
    solr_searcher = solr_searcher.field_limit("id")

    solr_searcher = solr_searcher.sort_by('-record-create-date_dts')

    paginator = Paginator(solr_searcher, 20)  # Show 25 contacts per page

    page = request.GET.get('page')
    try:
        results_page = paginator.page(page)
    except PageNotAnInteger:
        # If page is not an integer, deliver first page.
        results_page = paginator.page(1)
    except EmptyPage:
        # If page is out of range (e.g. 9999), deliver last page of results.
        results_page = paginator.page(paginator.num_pages)

    docs = []

    for row in results_page.object_list:
        docs.append(replace_doc_attrs(row))

    doc_ids = []
    for doc in docs:
        doc_ids.append(doc['id'])

    records_dict = {}
    records = list(Record.objects.using('local_records').filter(gen_id__in=doc_ids))
    for record in records:
        records_dict[record.gen_id] = etree.tostring(
            xslt_bib_draw_transformer(etree.XML(record.content), abstract='false()'), encoding='utf-8')

    for doc in docs:
        doc['record'] = records_dict.get(doc['id'])

    return render(request, 'ssearch/frontend/income.html', {
        'results_page': results_page,
        'docs': docs
    })
Example #11
0
def save_document(request):
    if request.method != 'POST':
        return HttpResponse('Only post requests')

    expiry_date = None
    if request.user.is_authenticated():
        owner_id = request.user.username
    elif request.session.session_key:
        owner_id = request.session.session_key
        expiry_date = request.session.get_expiry_date()
    else:
        return HttpResponse(
            json_error(
                u'Документ не может быть сохранен, возможно в Вашем браузере отключены cookies.'
            ))

    catalog = get_object_or_404(ZCatalog,
                                latin_title=request.POST['catalog_id'])
    zgate_url = catalog.url

    zstate = 'present+' + request.POST['zsession'] +\
             '+default+' + request.POST['zoffset'] +\
             '+1+X+1.2.840.10003.5.28+'+catalog.default_lang

    (xml_record, cookies) = zworker.request(zgate_url + '?' + zstate)

    try:
        tree = ET.XML(xml_record)
    except SyntaxError as e:
        return HttpResponse(
            json_error(u'Заказ не выполнен. Возможно, время сессии истекло'))

    comments = None
    if 'comments' in request.POST and request.POST['comments']:
        comments = request.POST['comments']

    try:
        doc = etree.XML(xml_record)
        result_tree = xslt_bib_draw_transformer(doc)
        full_document = unicode(result_tree)

        result_tree = short_transform(doc)
        short_document = unicode(result_tree)
    except Exception, e:
        raise e
Example #12
0
def participant_income(sigla):
    solr_connection = httplib2.Http(disable_ssl_certificate_validation=True)
    solr = sunburnt.SolrInterface(settings.SOLR['local_records_host'],
                                  http_connection=solr_connection)
    if sigla:
        query = solr.Q(**{'holder-sigla_s': sigla})
    else:
        query = solr.Q(**{'*': '*'})

    solr_searcher = solr.query(query)
    solr_searcher = solr_searcher.field_limit(['id', 'record-create-date_dts'])

    solr_searcher = solr_searcher.sort_by('-record-create-date_dts')

    paginator = Paginator(solr_searcher, 10)  # Show 25 contacts per page

    # If page is not an integer, deliver first page.
    results_page = paginator.page(1)

    docs = []

    for row in results_page.object_list:
        docs.append(replace_doc_attrs(row))

    doc_ids = []
    for doc in docs:
        doc_ids.append(doc['id'])

    records_dict = {}
    records = list(
        Record.objects.using('local_records').filter(gen_id__in=doc_ids))
    for record in records:
        records_dict[record.gen_id] = rusmarc_template.beautify(
            etree.tostring(xslt_bib_draw_transformer(etree.XML(record.content),
                                                     abstract='false()'),
                           encoding='utf-8'))

    for doc in docs:
        doc['record'] = records_dict.get(doc['id'])

    return {
        # 'results_page': results_page,
        'docs': docs,
        'sigla': sigla
    }
Example #13
0
def participant_income(sigla):
    solr_connection = httplib2.Http(disable_ssl_certificate_validation=True)
    solr = sunburnt.SolrInterface(settings.SOLR['local_records_host'], http_connection=solr_connection)
    if sigla:
        query = solr.Q(**{'holder-sigla_s': sigla})
    else:
        query = solr.Q(**{'*': '*'})

    solr_searcher = solr.query(query)
    solr_searcher = solr_searcher.field_limit(['id', 'record-create-date_dts'])

    solr_searcher = solr_searcher.sort_by('-record-create-date_dts')

    paginator = Paginator(solr_searcher, 10)  # Show 25 contacts per page

    # If page is not an integer, deliver first page.
    results_page = paginator.page(1)

    docs = []

    for row in results_page.object_list:
        docs.append(replace_doc_attrs(row))

    doc_ids = []
    for doc in docs:
        doc_ids.append(doc['id'])

    records_dict = {}
    records = list(Record.objects.using('local_records').filter(gen_id__in=doc_ids))
    for record in records:
        records_dict[record.gen_id] = rusmarc_template.beautify(etree.tostring(
            xslt_bib_draw_transformer(etree.XML(record.content), abstract='false()'), encoding='utf-8'))

    for doc in docs:
        doc['record'] = records_dict.get(doc['id'])

    return {
        # 'results_page': results_page,
        'docs': docs,
        'sigla': sigla
    }
Example #14
0
def save_document(request):
    if request.method != 'POST':
        return HttpResponse('Only post requests')


    expiry_date = None
    if request.user.is_authenticated():
        owner_id = request.user.username
    elif request.session.session_key:
        owner_id = request.session.session_key
        expiry_date = request.session.get_expiry_date()
    else:
        return HttpResponse(json_error(u'Документ не может быть сохранен, возможно в Вашем браузере отключены cookies.'))

    catalog = get_object_or_404(ZCatalog, latin_title=request.POST['catalog_id'])
    zgate_url = catalog.url

    zstate = 'present+' + request.POST['zsession'] +\
             '+default+' + request.POST['zoffset'] +\
             '+1+X+1.2.840.10003.5.28+'+catalog.default_lang

    (xml_record, cookies) = zworker.request(zgate_url + '?' + zstate)

    try:
        tree = ET.XML(xml_record)
    except SyntaxError as e:
        return HttpResponse(json_error(u'Заказ не выполнен. Возможно, время сессии истекло'))

    comments = None
    if 'comments' in request.POST and request.POST['comments']:
        comments = request.POST['comments']

    try:
        doc = etree.XML(xml_record)
        result_tree = xslt_bib_draw_transformer(doc)
        full_document = unicode(result_tree)

        result_tree = short_transform(doc)
        short_document = unicode(result_tree)
    except Exception, e:
        raise e
Example #15
0
def to_print(request, gen_id):
    catalog = None
    try:
        record = Record.objects.using('records').get(gen_id=gen_id)
        catalog = 'records'
    except Record.DoesNotExist:
        raise Http404()

    # DetailAccessLog(catalog=catalog, gen_id=record.gen_id).save()
    # DetailAccessLog.objects.create(catalog=catalog, gen_id=gen_id, date_time=datetime.datetime.now())

    doc_tree = etree.XML(record.content)
    # leader8 = doc_tree.xpath('/record/leader/leader08')
    #
    # analitic_level = u'0'
    # if len(leader8) == 1:
    #     analitic_level = leader8[0].text

    bib_tree = xslt_bib_draw_transformer(doc_tree)
    marct_tree = xslt_marc_dump_transformer(doc_tree)
    bib_dump = etree.tostring(bib_tree, encoding='utf-8')
    # marc_dump =  etree.tostring(marct_tree, encoding='utf-8')
    doc_tree_t = xslt_transformer(doc_tree)
    doc = doc_tree_to_dict(doc_tree_t)
    # holders = doc.get('holders', list())
    # if holders:
    #     # оставляем уникальных держателей
    #     doc['holders'] = list(set(holders))
    # linked_docs = []
    # if analitic_level == '1':
    #     doc['holders'] = []
    #
    #     solr = sunburnt.SolrInterface(settings.SOLR['host'])
    #     linked_query = solr.query(**{'linked-record-number_s':record.record_id.replace(u"\\",u'\\\\')})
    #     linked_query = linked_query.field_limit("id")
    #     linked_results = linked_query.execute()
    #
    #     linked_doc_ids = []
    #     for linked_doc in linked_results:
    #         linked_doc_ids.append(linked_doc['id'])
    #
    #     records =  list(Ebook.objects.using('records').filter(gen_id__in=linked_doc_ids))
    #     records +=  list(Record.objects.using('records').filter(gen_id__in=linked_doc_ids))
    #
    #     for record in records:
    #         record_dict = {}
    #         record_dict['record'] = xml_doc_to_dict(record.content)
    #         record_dict['id'] = record.gen_id
    #         linked_docs.append(record_dict)

    #        for doc in mlt_docs:
    #            doc['record'] = records_dict.get(doc['id'])

    # access_count = DetailAccessLog.objects.filter(catalog=catalog, gen_id=record.gen_id).count()

    return render(request, 'ssearch/frontend/print.html', {
        'doc_dump': bib_dump.replace('<b/>', ''),
        # 'marc_dump': marc_dump,
        'doc': doc,
        'gen_id': gen_id,
        # 'linked_docs': linked_docs,
        # 'access_count': access_count
    })
Example #16
0
def mba_orders(request):
    user_id = request.user.id

    def format_time(datestr='', timestr=''):
        if datestr:
            datestr = time.strptime(datestr, "%Y%m%d")
            datestr = time.strftime("%d.%m.%Y", datestr)
        if timestr:
            timestr = time.strptime(timestr, "%H%M%S")
            timestr = time.strftime("%H:%M:%S", timestr)
        return datestr + ' ' + timestr

    order_manager = OrderManager(settings.ORDERS['db_catalog'], settings.ORDERS['rdx_path'])
    transactions = order_manager.get_orders(user_id)
    orgs = {}
    # for org_id in transactions_by_org:
    orders = []
    for transaction in transactions:
        # print ET.tostring(transaction.illapdus[0].delivery_status.supplemental_item_description, encoding="UTF-8")
        try:
            doc = etree.XML(etree.tostring(transaction.illapdus[0].delivery_status.supplemental_item_description,
                                           encoding="UTF-8"))
            result_tree = xslt_bib_draw_transformer(doc)
            res = str(result_tree)
        except Exception, e:
            raise e
        res = res.replace('– –', '—')
        res = res.replace('\n', '</br>')
        order = {}

        if transaction.status in order_statuses_titles:
            order['status'] = order_statuses_titles[transaction.status]
        else:
            order['status'] = transaction.status
        order['type'] = ''
        order['copy_info'] = ''
        order['apdus'] = []

        for apdu in transaction.illapdus:
            apdu_map = {}

            apdu_map['type'] = apdu.delivery_status.type
            if apdu.delivery_status.type in apdy_type_titles:
                apdu_map['type_title'] = apdy_type_titles[apdu.delivery_status.type]
            else:
                apdu_map['type_title'] = apdu.delivery_status.type

            apdu_map['datetime'] = format_time(apdu.delivery_status.service_date_time['dtots']['date'],
                                               apdu.delivery_status.service_date_time['dtots']['time'])

            if isinstance(apdu.delivery_status, ILLRequest):
                order['order_id'] = apdu.delivery_status.transaction_id['tq']
                order['org_info'] = org_by_id(apdu.delivery_status.responder_id['pois']['is'])
                if apdu.delivery_status.third_party_info_type['tpit']['stl']['stlt']['si']:
                    order['org_info'] = org_by_id(
                        apdu.delivery_status.third_party_info_type['tpit']['stl']['stlt']['si'])
                apdu_map['requester_note'] = apdu.delivery_status.requester_note
                order['record'] = res
                order['user_comments'] = apdu.delivery_status.requester_note
                apdu_map['record'] = res
                if apdu.delivery_status.ill_service_type == '1':
                    apdu_map['service_type'] = u'доставка'
                    order['type'] = 'doc'

                elif apdu.delivery_status.ill_service_type == '2':
                    apdu_map['service_type'] = u'копия'
                    order['type'] = 'copy'
                    order['copy_info'] = apdu.delivery_status.item_id['pagination']

                order['type_title'] = apdu_map['service_type']
                order['can_delete'] = check_for_can_delete(transaction)

            else:
                # print apdu.delivery_status.type
                apdu_map['responder_note'] = apdu.delivery_status.responder_note
                if apdu.delivery_status.type == 'ILLAnswer':
                    apdu_map['reason_will_supply'] = apdu.delivery_status.results_explanation['wsr']['rws']
                    apdu_map['reason_will_supply_title'] = ''
                    if apdu_map['reason_will_supply'] in apdu_reason_will_supply:
                        apdu_map['reason_will_supply_title'] = apdu_reason_will_supply[apdu_map['reason_will_supply']]

                    apdu_map['unfilled_results'] = apdu.delivery_status.results_explanation['ur']['ru']
                    apdu_map['unfilled_results_title'] = ''
                    if apdu_map['unfilled_results'] in apdu_unfilled_results:
                        apdu_map['unfilled_results_title'] = apdu_unfilled_results[apdu_map['unfilled_results']]

            # apdu_map['record'] = res
            order['apdus'].append(apdu_map)

        orders.append(order)
Example #17
0
def detail(request, gen_id):
    catalog = None
    try:
        record = Record.objects.using('records').get(gen_id=gen_id)
        catalog = 'records'
    except Record.DoesNotExist:
        raise Http404()

    # DetailAccessLog(catalog=catalog, gen_id=record.gen_id).save()
    DetailAccessLog.objects.create(catalog=catalog, gen_id=gen_id, date_time=datetime.datetime.now())

    doc_tree = etree.XML(record.content)
    leader8 = doc_tree.xpath('/record/leader/leader08')

    analitic_level = u'0'
    if len(leader8) == 1:
        analitic_level = leader8[0].text

    bib_tree = xslt_bib_draw_transformer(doc_tree)
    marct_tree = xslt_marc_dump_transformer(doc_tree)
    bib_dump = etree.tostring(bib_tree, encoding='utf-8')
    marc_dump = etree.tostring(marct_tree, encoding='utf-8')
    doc_tree_t = xslt_transformer(doc_tree)
    doc = doc_tree_to_dict(doc_tree_t)
    holders = doc.get('holders', list())
    if holders:
        # оставляем уникальных держателей
        doc['holders'] = list(set(holders))
    linked_docs = []
    if analitic_level == '1':
        doc['holders'] = []

        solr_connection = httplib2.Http(disable_ssl_certificate_validation=True)
        solr = sunburnt.SolrInterface(settings.SOLR['host'], http_connection=solr_connection)

        linked_query = solr.query(**{'linked-record-number_s': record.record_id.replace(u"\\", u'\\\\')})
        linked_query = linked_query.field_limit("id")
        linked_results = linked_query.execute()

        linked_doc_ids = []
        for linked_doc in linked_results:
            linked_doc_ids.append(linked_doc['id'])

        records = list(Record.objects.using('records').filter(gen_id__in=linked_doc_ids))

        for record in records:
            record_dict = {}
            record_dict['record'] = xml_doc_to_dict(record.content)
            record_dict['id'] = record.gen_id
            linked_docs.append(record_dict)

            #        for doc in mlt_docs:
            #            doc['record'] = records_dict.get(doc['id'])

    access_count = DetailAccessLog.objects.filter(catalog=catalog, gen_id=record.gen_id).count()

    return render(request, 'ssearch/frontend/detail.html', {
        'doc_dump': rusmarc_template.beautify(bib_dump.replace('<b/>', '')),
        'marc_dump': marc_dump,
        'doc': doc,
        'gen_id': gen_id,
        'linked_docs': linked_docs,
        'access_count': access_count
    })

    def clean_holder_title(holder):
        return holder.get('org', {}).get('title', '') \
            .lower() \
            .replace(u'цбс', '') \
            .replace(u'го', '') \
            .replace(u'г.', '') \
            .strip()

    holders.sort(key=clean_holder_title)

    return holders
Example #18
0
def to_print(request, gen_id):
    catalog = None
    try:
        record = Record.objects.using('records').get(gen_id=gen_id)
        catalog = 'records'
    except Record.DoesNotExist:
        raise Http404()

    # DetailAccessLog(catalog=catalog, gen_id=record.gen_id).save()
    # DetailAccessLog.objects.create(catalog=catalog, gen_id=gen_id, date_time=datetime.datetime.now())

    doc_tree = etree.XML(record.content)
    # leader8 = doc_tree.xpath('/record/leader/leader08')
    #
    # analitic_level = u'0'
    # if len(leader8) == 1:
    #     analitic_level = leader8[0].text

    bib_tree = xslt_bib_draw_transformer(doc_tree)
    marct_tree = xslt_marc_dump_transformer(doc_tree)
    bib_dump = etree.tostring(bib_tree, encoding='utf-8')
    # marc_dump =  etree.tostring(marct_tree, encoding='utf-8')
    doc_tree_t = xslt_transformer(doc_tree)
    doc = doc_tree_to_dict(doc_tree_t)
    # holders = doc.get('holders', list())
    # if holders:
    #     # оставляем уникальных держателей
    #     doc['holders'] = list(set(holders))
    # linked_docs = []
    # if analitic_level == '1':
    #     doc['holders'] = []
    #
    #     solr = sunburnt.SolrInterface(settings.SOLR['host'])
    #     linked_query = solr.query(**{'linked-record-number_s':record.record_id.replace(u"\\",u'\\\\')})
    #     linked_query = linked_query.field_limit("id")
    #     linked_results = linked_query.execute()
    #
    #     linked_doc_ids = []
    #     for linked_doc in linked_results:
    #         linked_doc_ids.append(linked_doc['id'])
    #
    #     records =  list(Ebook.objects.using('records').filter(gen_id__in=linked_doc_ids))
    #     records +=  list(Record.objects.using('records').filter(gen_id__in=linked_doc_ids))
    #
    #     for record in records:
    #         record_dict = {}
    #         record_dict['record'] = xml_doc_to_dict(record.content)
    #         record_dict['id'] = record.gen_id
    #         linked_docs.append(record_dict)

    #        for doc in mlt_docs:
    #            doc['record'] = records_dict.get(doc['id'])

    # access_count = DetailAccessLog.objects.filter(catalog=catalog, gen_id=record.gen_id).count()

    return render(
        request,
        'ssearch/frontend/print.html',
        {
            'doc_dump': bib_dump.replace('<b/>', ''),
            # 'marc_dump': marc_dump,
            'doc': doc,
            'gen_id': gen_id,
            # 'linked_docs': linked_docs,
            # 'access_count': access_count
        })
Example #19
0
def detail(request, gen_id):
    catalog = None
    try:
        record = Record.objects.using('records').get(gen_id=gen_id)
        catalog = 'records'
    except Record.DoesNotExist:
        raise Http404()

    # DetailAccessLog(catalog=catalog, gen_id=record.gen_id).save()
    DetailAccessLog.objects.using(MAIN_PORTAL_DB).create(
        catalog=catalog, gen_id=gen_id, date_time=datetime.datetime.now())

    doc_tree = etree.XML(record.content)
    leader8 = doc_tree.xpath('/record/leader/leader08')

    analitic_level = u'0'
    if len(leader8) == 1:
        analitic_level = leader8[0].text

    bib_tree = xslt_bib_draw_transformer(doc_tree)
    marct_tree = xslt_marc_dump_transformer(doc_tree)
    bib_dump = etree.tostring(bib_tree, encoding='utf-8')
    marc_dump = etree.tostring(marct_tree, encoding='utf-8')
    doc_tree_t = xslt_transformer(doc_tree)
    doc = doc_tree_to_dict(doc_tree_t)
    holders = doc.get('holders', list())
    if holders:
        # оставляем уникальных держателей
        doc['holders'] = list(set(holders))
    linked_docs = []
    if analitic_level == '1':
        doc['holders'] = []

        solr_connection = httplib2.Http(
            disable_ssl_certificate_validation=True)
        solr = sunburnt.SolrInterface(settings.SOLR['host'],
                                      http_connection=solr_connection)

        linked_query = solr.query(**{
            'linked-record-number_s':
            record.record_id.replace(u"\\", u'\\\\')
        })
        linked_query = linked_query.field_limit("id")
        linked_results = linked_query.execute()

        linked_doc_ids = []
        for linked_doc in linked_results:
            linked_doc_ids.append(linked_doc['id'])

        records = list(
            Record.objects.using('records').filter(gen_id__in=linked_doc_ids))

        for record in records:
            record_dict = {}
            record_dict['record'] = xml_doc_to_dict(record.content)
            record_dict['id'] = record.gen_id
            linked_docs.append(record_dict)

            #        for doc in mlt_docs:
            #            doc['record'] = records_dict.get(doc['id'])

    access_count = DetailAccessLog.objects.using(MAIN_PORTAL_DB).filter(
        catalog=catalog, gen_id=record.gen_id).count()

    return render(
        request, 'ssearch/frontend/detail.html', {
            'doc_dump': rusmarc_template.beautify(bib_dump.replace('<b/>',
                                                                   '')),
            'marc_dump': marc_dump,
            'doc': doc,
            'gen_id': gen_id,
            'linked_docs': linked_docs,
            'access_count': access_count
        })

    def clean_holder_title(holder):
        return holder.get('org', {}).get('title', '') \
            .lower() \
            .replace(u'цбс', '') \
            .replace(u'го', '') \
            .replace(u'г.', '') \
            .strip()

    holders.sort(key=clean_holder_title)

    return holders