def unbis(): ''' outputs UNBIS thesaurus subject heading records in MARCXML format /unbis?skip=n&limit=m skip=n URL parameter is used to skip n records. Default is 0. limit=m URL parameter is used to limit number of records returned. Default is 50. it uses DLX bibset.to_xml serialization function to output fields 035 and 150 in MARCXML ''' try: skp = int(request.args.get('skip')) except: skp = 0 try: limt = int(request.args.get('limit')) except: limt = 50 print(f"skip is {skp} and limit is {limt}") query = QueryDocument( Condition(tag='035', subfields={'a': re.compile('^T')})) print(query.to_json()) authset = AuthSet.from_query(query, projection={ '035': 1, '150': 1 }, skip=skp, limit=limt) unbis = authset.to_xml() return Response(unbis, mimetype='text/xml')
def test_from_query(db): from dlx.marc import MarcSet, BibSet, AuthSet, QueryDocument, Condition bibset = BibSet.from_query({'_id': {'$in': [1, 2]}}) assert isinstance(bibset, (MarcSet, BibSet)) assert bibset.count == 2 assert isinstance(bibset.records, map) bibset.cache() assert isinstance(bibset.records, list) bibset = BibSet.from_query({}, skip=0, limit=1) assert bibset.count == 1 for bib in bibset: assert bib.id == 1 assert len(list(bibset.records)) == 0 assert bibset.count == 1 conditions = [ Condition(tag='150', subfields={'a': 'Header'}), Condition(tag='200', modifier='not_exists') ] authset = AuthSet.from_query(conditions) assert isinstance(authset, (MarcSet, AuthSet)) assert authset.count == 1 assert isinstance(authset.records, map) authset.cache() assert isinstance(authset.records, list) query = QueryDocument( Condition('245', modifier='exists') ) bibset = BibSet.from_query(query) assert isinstance(bibset, BibSet) assert bibset.count == 2
def test_iterate(db): from dlx.marc import Bib, BibSet, Auth, AuthSet for bib in BibSet.from_query({}): assert isinstance(bib, Bib) for auth in AuthSet.from_query({}): assert isinstance(auth, Auth)
def date_unbis(date): ''' outputs records in native central DB schema json format for the date which is provided as a dynamic route inputed in YYYYMMDD or YYYY-MM-DD e.g. /YYYY-MM-DD/json e.g. /YYYYMMDD/json?skip=n&limit=m skip=n URL parameter is used to skip n records. Default is 0. limit=m URL parameter is used to limit number of records returned. Default is 50. if the date is in wrong format the function returns today's records it uses DLX's bibset.to_json serialization function to output json ''' try: skp = int(request.args.get('skip')) except: skp = 0 try: limt = int(request.args.get('limit')) except: limt = 50 #print(f"skip is {skp} and limit is {limt}") str_date = date.replace('-', '') print(f"the original str_date is {str_date}") #if len(str_date)!= 8: #date = datetime.datetime.now() #str_date=str(date.year)+str(date.month)+str(date.day) print(f"the str_date is {str_date}") query = QueryDocument( Condition(tag='998', subfields={'z': re.compile('^' + str_date)}), Condition(tag='035', subfields={'a': re.compile('^T')})) #print(query.to_json()) ''' authset = AuthSet.from_query(query, projection={'035':1,'150':1}, skip=skp, limit=limt) unbis=authset.to_xml() return Response(unbis, mimetype='text/xml') ''' dict1 = {} authset = AuthSet.from_query(query, projection={ '035': 1, '150': 1 }, skip=skp, limit=limt) for auth in authset: val_035a = auth.get_values('035', 'a') #print(f"035 values are: {val_035a}") val_035a = ''.join([str for str in val_035a if str[0] == 'T']) #dict1[auth.get_value('035','a')]=auth.get_value('150','a') dict1[val_035a] = auth.get_value('150', 'a') #dict1['FR']=auth.get_value('993','a') #unbis=authset.to_xml() #return Response(unbis, mimetype='text/xml') return jsonify(dict1)
def fetch_auth_data(self,proj_auth_dict): match_criteria=fetch_agenda(self.body,self.session) query_auth = QueryDocument( Condition( tag='191', #subfields={'a': re.compile('^'+self.body+'/'+self.session[0:4])} subfields={'a': match_criteria} ) ) #print(query.to_json()) authset=AuthSet.from_query(query_auth, projection=proj_auth_dict, skip=0, limit=0) lauths=list(authset.records) print(f"authset length is : {len(lauths)}") return lauths#, l_temp
def unbis_tcode(tcode): ''' looks up UNBIS thesaurus T codes and returns matching subject heading records skip=n URL parameter is used to skip n records. Default is 0. limit=m URL parameter is used to limit number of records returned. Default is 50. it uses DLX bibset.to_xml serialization function to output fields 035 and 150 in MARCXML ''' try: skp = int(request.args.get('skip')) except: skp = 0 try: limt = int(request.args.get('limit')) except: limt = 50 #print(f"skip is {skp} and limit is {limt}") query = QueryDocument( Condition(tag='035', subfields={'a': re.compile(str(tcode).upper())})) print(query.to_json()) dict1 = {} authset = AuthSet.from_query(query, projection={ '035': 1, '150': 1, '993': 1, '994': 1, '995': 1, '996': 1, '997': 1 }, skip=skp, limit=limt) for auth in authset: val_035a = auth.get_values('035', 'a') #print(f"035 values are: {val_035a}") val_035a = ''.join([str for str in val_035a if str[0] == 'T']) #dict1[auth.get_value('035','a')]=auth.get_value('150','a') dict1[val_035a] = { 'EN': auth.get_value('150', 'a'), 'FR': auth.get_value('993', 'a'), 'ES': auth.get_value('994', 'a'), 'AR': auth.get_value('995', 'a'), 'ZH': auth.get_value('996', 'a'), 'RU': auth.get_value('997', 'a') } #dict1['FR']=auth.get_value('993','a') #unbis=authset.to_xml() #return Response(unbis, mimetype='text/xml') return jsonify(dict1)
def unbis_label(label): ''' looks up UNBIS thesaurus labels and returns matching T codes skip=n URL parameter is used to skip n records. Default is 0. limit=m URL parameter is used to limit number of records returned. Default is 50. it uses DLX authset to output fields 035 and 150 ''' try: skp = int(request.args.get('skip')) except: skp = 0 try: limt = int(request.args.get('limit')) except: limt = 50 print(f"skip is {skp} and limit is {limt}") query = QueryDocument( Condition(tag='150', subfields={'a': re.compile(str(label).upper())})) print(query.to_json()) dict1 = {} authset = AuthSet.from_query(query, projection={ '035': 1, '150': 1 }, skip=skp, limit=limt) ''' for auth in authset: dict1[auth.get_value('150','a')]=auth.get_value('035','a') #unbis=authset.to_xml() #return Response(unbis, mimetype='text/xml') return jsonify(dict1) ''' for auth in authset: val_035a = auth.get_values('035', 'a') #print(f"035 values are: {val_035a}") val_035a = ''.join([str for str in val_035a if str[0] == 'T']) #dict1[auth.get_value('035','a')]=auth.get_value('150','a') dict1[auth.get_value('150', 'a')] = val_035a #dict1['FR']=auth.get_value('993','a') #unbis=authset.to_xml() #return Response(unbis, mimetype='text/xml') return jsonify(dict1)
def votes(topic): ''' looks up UNBIS thesaurus labels and returns matching T codes .. skip=n URL parameter is used to skip n records. Default is 0. limit=m URL parameter is used to limit number of records returned. Default is 50. it uses DLX authset to output fields 035 and 150 ''' try: skp = int(request.args.get('skip')) except: skp = 0 try: limt = int(request.args.get('limit')) except: limt = 50 try: yr_from = request.args.get('year_from') except: yr_from = "1980" try: yr_to = request.args.get('year_to') except: yr_to = '2020' try: cntry = request.args.get('Country') except: cntry = 'CANADA' try: vt = request.args.get('Vote') except: vt = 'A' print(f"skip is {skp} and limit is {limt}") print(f"year_from is {yr_from} and year_to is {yr_to}") print(f"Country is {cntry}") print(f"Vote is {vt}") query = QueryDocument( Condition(tag='191', subfields={'d': re.compile(str(topic))}), Condition(tag='191', subfields={'a': re.compile('^A')})) print(query.to_json()) dict_auth_ids = {} authset = AuthSet.from_query(query, projection={ '001': 1, '191': 1 }, skip=skp, limit=limt) for auth in authset: dict_auth_ids[auth.get_value('191', 'a')] = auth.get_value('001') #unbis=authset.to_xml() #return Response(unbis, mimetype='text/xml') #return jsonify(dict_auth_ids) dict_bibs = {} str_bibs = '' votecountry = '' for key, value in dict_auth_ids.items(): #sample_id=int(dict_auth_ids['A/74/251']) print(f"the id of {key} is {value}") query_bib = QueryDocument( Condition(tag='991', subfields={'d': int(value)}), Condition(tag='989', subfields={'a': re.compile(str('Voting Data'))})) print(query_bib.to_json()) bibset = BibSet.from_query(query_bib, projection={ '001': 1, '791': 1, '967': 1 }, skip=skp, limit=limt) for bib in bibset: for field in bib.get_fields('967'): votecountry = field.get_value("d") + field.get_value("e") #print(f'Country+Vote: {votecountry}') if str(votecountry) == str(vt) + str( cntry ): # for the entries matching input query parameters using AND logic dict_bibs[bib.get_value('791', 'a')] = bib.get_value('001') str_bibs = str_bibs + ' OR 791:[' + bib.get_value( '791', 'a') + ']' print(str_bibs) return jsonify(dict_bibs)