class ItemList(SimpleGetMixin, SimpleView): """ Paginated list of items. Use the 'limit' and 'offset' query parameters for paging. """ queryset = solr.Queryset().filter(type='Item') serializer_class = serializers.ItemSerializer ordering = [ 'call_number', 'barcode', 'id', 'record_number', 'parent_bib_id', 'parent_bib_record_number', 'volume', 'copy_number', 'checkout_date' ] filter_fields = [ 'record_number', 'call_number', 'volume', 'volume_sort', 'copy_number', 'barcode', 'long_messages', 'internal_notes', 'public_notes', 'local_code1', 'number_of_renewals', 'item_type_code', 'price', 'internal_use_count', 'iuse3_count', 'total_checkout_count', 'total_renewal_count', 'year_to_date_checkout_count', 'last_year_to_date_checkout_count', 'location_code', 'status_code', 'due_date', 'checkout_date', 'last_checkin_date', 'overdue_date', 'recall_date', 'record_creation_date', 'record_last_updated_date', 'record_revision_number', 'suppressed', 'parent_bib_record_number', 'parent_bib_title', 'parent_bib_main_author', 'parent_bib_publication_year', 'call_number_type' ] resource_name = 'items'
class CallnumbermatchesList(SimpleGetMixin, SimpleView): """ Returns the first X matching call numbers, where X is the supplied limit. Pagination (offset) is not supported. You can filter using the following fields: callNumber, locationCode, and callNumberType. """ queryset = solr.Queryset().filter( type='Item').only('call_number').order_by('call_number_sort') serializer_class = serializers.ItemSerializer resource_name = 'callnumber_matches' filter_fields = ['call_number', 'location_code', 'call_number_type'] def get_page_data(self, queryset, request): # for paging, we only use the 'limit' limit_p = settings.REST_FRAMEWORK.get('PAGINATE_BY_PARAM', 'limit') max_limit = settings.REST_FRAMEWORK.get('MAX_PAGINATE_BY', 500) default_limit = settings.REST_FRAMEWORK.get('PAGINATE_BY', 10) limit = int(request.query_params.get(limit_p, default_limit)) limit = max_limit if limit > max_limit else limit data, i, count = [], 0, queryset.count() while len(data) < limit and i < count: call_number = queryset[i].get('call_number', None) if call_number is not None and call_number not in data: data.append(call_number) i += 1 return data
def commit_to_redis(self, vals): self.log('Info', 'Committing Holdings updates to Redis...') h_vals = vals.get('holdings', {}) er_vals = vals.get('eresources', {}) rev_handler = redisobjs.RedisObject('reverse_holdings_list', '0') reverse_h_list = rev_handler.get() for er_rec_num, lists in (h_vals or {}).items(): s = solr.Queryset().filter(record_number=er_rec_num) try: record = s[0] except IndexError: record = None er_handler = redisobjs.RedisObject('eresource_holdings_list', er_rec_num) h_list = er_handler.get() for h_rec_num in lists.get('delete', []): h_index = h_list.index(h_rec_num) del (h_list[h_index]) del (reverse_h_list[h_rec_num]) del (record.holdings[h_index]) for h_rec_num in lists.get('append', []): h_list.append(h_rec_num) reverse_h_list[h_rec_num] = er_rec_num record.save() er_handler.set(h_list) rev_handler.set(reverse_h_list)
class BibList(SimpleGetMixin, SimpleView): """ Paginated list of bibs. Use the 'limit' and 'offset' query parameters for paging. """ queryset = solr.Queryset( using=settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Bibs']) serializer_class = serializers.BibSerializer ordering = [ 'call_number', 'id', 'record_number', 'material_type', 'timestamp', 'main_call_number_sort' ] filter_fields = [ 'record_number', 'call_number', 'id', 'suppressed', 'material_type', 'issn_numbers', 'timestamp', 'full_title', 'main_title', 'subtitle', 'statement_of_responsibility', 'uniform_title', 'alternate_titles', 'related_titles', 'series', 'creator', 'contributors', 'series_creators', 'people', 'corporations', 'meetings', 'imprints', 'publication_country', 'publication_places', 'publishers', 'publication_dates', 'full_subjects', 'general_terms', 'topic_terms', 'genre_terms', 'era_terms', 'form_terms', 'other_terms', 'physical_characteristics', 'toc_notes', 'context_notes', 'summary_notes', 'main_call_number', 'loc_call_numbers', 'dewey_call_numbers', 'other_call_numbers', 'sudoc_numbers', 'isbn_numbers', 'lccn_numbers', 'oclc_numbers' ] resource_name = 'bibs'
class ItemStatusesList(SimpleGetMixin, SimpleView): """ Paginated list of bibs. Use the 'limit' and 'offset' query parameters for paging. """ queryset = solr.Queryset().filter(type='ItemStatus') serializer_class = serializers.ItemStatusSerializer resource_name = 'itemstatuses' ordering = ['code', 'label'] filter_fields = ['code', 'label']
class LocationList(SimpleGetMixin, SimpleView): """ Paginated list of bibs. Use the 'limit' and 'offset' query parameters for paging. """ queryset = solr.Queryset().filter(type='Location') serializer_class = serializers.LocationSerializer resource_name = 'locations' ordering = ['code', 'label'] filter_fields = ['code', 'label']
class MarcList(SimpleGetMixin, SimpleView): """ Paginated list of MARC records. Use the 'limit' and 'offset' query parameters for paging. """ queryset = solr.Queryset( using=settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Marc']) serializer_class = serializers.MarcSerializer resource_name = 'marc' filter_fields = [ 'record_number', '/^(mf_)?\\d{3}$/', '/^(sf_)?\\d{3}[a-z0-9]$/' ] filter_class = filters.MarcFilter
def get_location_manifest(self, location_code, using=None): """ Query the underlying Solr index to pull a list of all item ids for a given location code, pre-sorted based on the `solr_shelflist_sort_criteria` class attribute. Returns the list of ids, in order. """ conn = self.get_backend(using=using).conn man_qs = solr.Queryset(conn=conn).filter(type=self.type_name, location_code=location_code) man_qs = man_qs.order_by(*self.solr_shelflist_sort_criteria).only('id') results = man_qs.set_raw_params({'rows': len(man_qs)}).full_response return [i['id'] for i in results]
def cache_all_lookups(self): qs = solr.Queryset().filter(type='ItemStatus').only('code', 'label') results = [i for i in qs] lookup = {} for r in results: try: lookup[r['code']] = r['label'] except KeyError: try: lookup[r['code']] = None except KeyError: pass self.cache_lookup('status', lookup)
def get_page_data(self, queryset, request): ff = self.facet_field facets = queryset.full_response.facets['facet_fields'][ff] fields = [ 'id', 'parent_bib_title', 'parent_bib_record_number', 'call_number', 'barcode', 'record_number', 'call_number_type' ] total_count = len(facets) / 2 items = [] for key in facets[0:len(facets):2]: facet_qs = solr.Queryset() facet_qs._search_params['fq'] = queryset._search_params['fq'] facet_qs = facet_qs.filter(**{ff: key}) facet_qs = facet_qs.order_by('call_number_sort').only(*fields) item_uri = APIUris.get_uri('items-detail', id=facet_qs[0]['id'], req=request, absolute=True) items.append({ '_links': { 'self': { 'href': item_uri } }, 'id': facet_qs[0].get('id', None), 'parentBibRecordNumber': facet_qs[0].get('parent_bib_record_number', None), 'parentBibTitle': facet_qs[0].get('parent_bib_title', None), 'recordNumber': facet_qs[0].get('record_number', None), 'callNumber': facet_qs[0].get('call_number', None), 'callNumberType': facet_qs[0].get('call_number_type', None), 'barcode': facet_qs[0].get('barcode', None), 'locationCode': key, }) data = OrderedDict() data['totalCount'] = total_count data['_links'] = {'self': request.build_absolute_uri()} data['_embedded'] = {'items': items} return data
def prepare(self, obj): """ Prepares data on the object for indexing. Here, if the object doesn't have any user data fields defined, then we know it's coming from Sierra and that means we need to query the Solr index to add any existing values for these fields to the object before it's re-indexed. """ self.prepared_data = super(ShelflistItemIndex, self).prepare(obj) if not self.has_any_user_data(obj): item = solr.Queryset(conn=self.solr_conn).get_one(id=obj.id) if item: for field in self.user_data_fields: self.prepared_data[field] = getattr(item, field, None) return self.prepared_data
class ItemStatusesDetail(SimpleGetMixin, SimpleView): ''' Retrieve one Item Status. ''' queryset = solr.Queryset().filter(type='ItemStatus') serializer_class = serializers.ItemStatusSerializer multi = False def get_object(self): queryset = self.get_queryset() try: obj = queryset.filter(code=self.kwargs['code'])[0] except IndexError: raise Http404 else: return obj
class EResourceDetail(SimpleGetMixin, SimpleView): ''' Retrieve one eresource. ''' queryset = solr.Queryset().filter(type='eResource') serializer_class = serializers.EResourceSerializer multi = False def get_object(self): queryset = self.get_queryset() try: obj = queryset.filter(id=self.kwargs['id'])[0] except IndexError: raise Http404 else: return obj
class ItemTypesDetail(SimpleGetMixin, SimpleView): """ Retrieve one Location. """ queryset = solr.Queryset().filter(type='Itype') serializer_class = serializers.ItemTypeSerializer resource_name = 'itemtypes' multi = False def get_object(self): queryset = self.get_queryset() try: obj = queryset.filter(code=self.kwargs['code'])[0] except IndexError: raise Http404 else: return obj
class EResourceList(SimpleGetMixin, SimpleView): """ Paginated list of eresources. Use the 'limit' and 'offset' query parameters for paging. """ queryset = solr.Queryset().filter(type='eResource') serializer_class = serializers.EResourceSerializer ordering = [ 'record_number', 'parent_bib_record_number', 'eresource_type', 'publisher', 'title', 'alert' ] filter_fields = [ 'record_number', 'parent_bib_record_number', 'eresource_type', 'publisher', 'title', 'alternate_titles', 'subjects', 'summary', 'internal_notes', 'public_notes', 'alert', 'holdings', 'suppressed' ] resource_name = 'eresources'
class MarcDetail(SimpleGetMixin, SimpleView): ''' Retrieve one MARC record. ''' queryset = solr.Queryset( using=settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Marc']) serializer_class = serializers.MarcSerializer multi = False def get_object(self): queryset = self.get_queryset() try: obj = queryset.filter(id=self.kwargs['id'])[0] except IndexError: raise Http404 else: return obj
class BibDetail(SimpleGetMixin, SimpleView): """ Retrieve one bib. """ queryset = solr.Queryset( using=settings.REST_VIEWS_HAYSTACK_CONNECTIONS['Bibs']) serializer_class = serializers.BibSerializer resource_name = 'bibs' multi = False def get_object(self): queryset = self.get_queryset() try: obj = queryset.filter(id=self.kwargs['id'])[0] except IndexError: raise Http404 else: return obj
def cache_all_lookups(self): types = ['Location', 'ItemStatus', 'Itype'] qs = solr.Queryset(page_by=1000).filter(type__in=types) qs = qs.only('type', 'code', 'label') results = [i for i in qs] lookups = {'Location': {}, 'ItemStatus': {}, 'Itype': {}} for r in results: try: lookups[r['type']][r['code']] = r['label'] except KeyError: try: lookups[r['type']][r['code']] = None except KeyError: pass self.cache_lookup('location', lookups['Location']) self.cache_lookup('status', lookups['ItemStatus']) self.cache_lookup('item_type', lookups['Itype'])
class ShelflistItemDetail(SimplePutMixin, SimplePatchMixin, SimpleGetMixin, SimpleView): ''' Retrieve one item. ''' queryset = solr.Queryset().filter(type='Item') serializer_class = serializers.ShelflistItemSerializer multi = False parser_classes = (JSONPatchParser, JSONParser) permission_classes = (permissions.IsAuthenticatedOrReadOnly, ) def get_object(self): queryset = self.get_queryset() try: obj = queryset.filter(id=self.kwargs['shelflistitem_id'])[0] except IndexError: raise Http404 else: return obj
def get_location_set_from_recs(self, records, using=None): """ Query the underlying Solr index to pull the set of location codes represented by the given `records` (ItemRecord queryset, or RecordMetadata queryset of items). """ record_pks = [r['pk'] for r in records.values('pk')] conn = self.get_backend(using=using).conn lcode_qs = solr.Queryset(conn=conn).filter(id__in=record_pks) facet_params = { 'rows': 0, 'facet': 'true', 'facet.field': 'location_code', 'facet.mincount': 1 } facets = lcode_qs.set_raw_params(facet_params).full_response.facets try: return set(facets['facet_fields']['location_code'][0::2]) except KeyError: return set()
def final_callback(self, vals={}, status='success'): if type(vals) is list: vals = collapse_vals(vals) h_vals = vals.get('holdings', {}) er_vals = vals.get('eresources', {}) self.eresources_to_solr(self.instance.pk, self.export_filter, self.export_type, self.options).final_callback(er_vals, status) # commit changes to Redis and commit deletions to Solr self.log('Info', 'Committing updates to Redis...') rev_handler = redisobjs.RedisObject('reverse_holdings_list', '0') reverse_h_list = rev_handler.get() for er_rec_num, lists in h_vals.iteritems(): s = solr.Queryset().filter(record_number=er_rec_num) try: record = s[0] except IndexError: record = None er_handler = redisobjs.RedisObject('eresource_holdings_list', er_rec_num) h_list = er_handler.get() for h_rec_num in lists.get('delete', []): h_index = h_list.index(h_rec_num) del(h_list[h_index]) del(reverse_h_list[h_rec_num]) del(record.holdings[h_index]) for h_rec_num in lists.get('append', []): h_list.append(h_rec_num) reverse_h_list[h_rec_num] = er_rec_num record.save() er_handler.set(h_list) rev_handler.set(reverse_h_list) index = self.index_class() index.commit(using=self.hs_conn)
def export_records(self, records): eresources, er_mapping = set(), {} # First we loop through the holding records and determine which # eresources need to be updated. er_mapping maps eresource rec # nums to lists of holdings rec nums to update. rev_handler = redisobjs.RedisObject('reverse_holdings_list', '0') reverse_holdings_list = rev_handler.get() or {} for h in records: h_rec_num = h.record_metadata.get_iii_recnum(True) old_er_rec_num = reverse_holdings_list.get(h_rec_num, None) try: er_record = h.resourcerecord_set.all()[0] except IndexError: er_record, er_rec_num = None, None else: er_rec_num = er_record.record_metadata.get_iii_recnum(True) if old_er_rec_num and old_er_rec_num != er_rec_num: # if the current attached er rec_num in Sierra is # different than what's in Redis, then we need to # delete this holding from the old er record. old_h_data = er_mapping.get(old_er_rec_num, []) old_h_data.append({ 'delete': True, 'rec_num': h_rec_num, 'title': None }) er_mapping[old_er_rec_num] = { 'er_record': None, 'holdings': old_h_data } if er_rec_num: holdings = er_mapping.get(er_rec_num, {}).get('holdings', []) try: vf = h.bibrecord_set.all()[0].record_metadata\ .varfield_set.all() except IndexError: title = None else: title = helpers.get_varfield_vals( vf, 't', '245', cm_kw_params={'subfields': 'a'}, content_method='display_field_content') data = {'delete': False, 'title': title, 'rec_num': h_rec_num} holdings.append(data) er_mapping[er_rec_num] = { 'er_record': er_record, 'holdings': holdings } h_vals = {} #self.log('Info', er_mapping) for er_rec_num, entry in er_mapping.iteritems(): er_record, holdings = entry['er_record'], entry['holdings'] # if we've already indexed the eresource this holding is # attached to, then we want to pull the record from Solr # and make whatever changes to it rather than reindex the # whole record and all attached holdings from scratch. # Since export jobs get broken up and run in parallel, we # want to hold off on actually committing to Solr and # updating Redis until the callback runs. s = solr.Queryset().filter(record_number=er_rec_num) if s.count() > 0: rec_queue = h_vals.get(er_rec_num, {}) rec_append_list = rec_queue.get('append', []) rec_delete_list = rec_queue.get('delete', []) record = s[0] red = redisobjs.RedisObject('eresource_holdings_list', er_rec_num) red_h_list = red.get() for data in holdings: try: red_h_index = red_h_list.index(data.get('rec_num')) except AttributeError: self.log('Info', '{}'.format(data.get('rec_num'))) except ValueError: record.holdings.append(data.get('title')) rec_append_list.append(data.get('rec_num')) else: if data.get('delete'): # we wait until the final callback to # delete anything from Solr, because that # will mess up our holdings index number rec_delete_list.append(data.get('rec_num')) else: record.holdings[red_h_index] = data.get('title') record.save(commit=False) rec_queue['append'] = rec_append_list rec_queue['delete'] = rec_delete_list h_vals[er_rec_num] = rec_queue else: # if we haven't indexed the record already, we'll add # it using the Haystack indexer. eresources.add(er_record) if eresources: eresources = list(eresources) ret_er_vals = self.eresources_to_solr.export_records(eresources) return {'holdings': h_vals, 'eresources': ret_er_vals}
def get_queryset(self): return solr.Queryset().filter( type='Item', location_code=self.kwargs['code']).order_by( 'call_number_type', 'call_number_sort', 'volume_sort', 'copy_number')
class FirstItemPerLocationList(SimpleGetMixin, SimpleView): """ Returns the first item (by call number) for each location within a filtered result set. """ facet_field = 'location_code' queryset = solr.Queryset().filter(type='Item').search('*:*', params={ 'facet': 'true', 'facet.field': facet_field, 'facet.sort': 'index', 'facet.mincount': 1 }) serializer_class = serializers.ItemSerializer resource_name = 'firstitemperlocation' filter_fields = ['call_number', 'call_number_type', 'barcode'] def get_page_data(self, queryset, request): ff = self.facet_field facets = queryset.full_response.facets['facet_fields'][ff] fields = [ 'id', 'parent_bib_title', 'parent_bib_record_number', 'call_number', 'barcode', 'record_number', 'call_number_type' ] total_count = len(facets) / 2 items = [] for key in facets[0:len(facets):2]: facet_qs = solr.Queryset() facet_qs._search_params['fq'] = queryset._search_params['fq'] facet_qs = facet_qs.filter(**{ff: key}) facet_qs = facet_qs.order_by('call_number_sort').only(*fields) item_uri = APIUris.get_uri('items-detail', id=facet_qs[0]['id'], req=request, absolute=True) items.append({ '_links': { 'self': { 'href': item_uri } }, 'id': facet_qs[0].get('id', None), 'parentBibRecordNumber': facet_qs[0].get('parent_bib_record_number', None), 'parentBibTitle': facet_qs[0].get('parent_bib_title', None), 'recordNumber': facet_qs[0].get('record_number', None), 'callNumber': facet_qs[0].get('call_number', None), 'callNumberType': facet_qs[0].get('call_number_type', None), 'barcode': facet_qs[0].get('barcode', None), 'locationCode': key, }) data = OrderedDict() data['totalCount'] = total_count data['_links'] = {'self': request.build_absolute_uri()} data['_embedded'] = {'items': items} return data