def boundwith(xml_string): """ It receives a MARCXML string; it parses the 773 field; and it captures the doc_id stored in the "w" subfield. the "w" subfield should contain the doc_id of the parent record. it returns the doc_id, if it exists. """ doc_id = "" in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return doc_id, 1 field_773 = record.get_field("773") if len(field_773) > 0: subfield = field_773[0].split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "w": doc_id = str(sf[1:]) return doc_id, 0
def get_findingaids_url(xml_string): """ it receives an MARCXML string of an "archive" record and it looks for the URL stored in the MARC 555 field. """ fa_url = "" outcome = 0 in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: sys.stderr.write("failed to get findingaids url\n") return fa_url, 1 field_list = record.get_field("555") for field in field_list: subfield = field.split("\\p") fa_url = "" for sf in subfield: if sf != "": if sf[0:1] == "u": fa_url = sf[1:] break outcome = 0 if fa_url == "": outcome = 1 return fa_url, outcome
def get_bib_title(xml_string): """ it receives a MARCXML string and it extracts the bibliographic title. """ title="" outcome=0 in_string=xml_string.replace("\n","") try: record=marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return title_list=record.get_field("245") for title in title_list: title_part_a="" title_part_b="" subfield=title.split("\\p") title="" for sf in subfield: if sf != "": if sf[0:1] == "a": title_part_a=sf[1:] if title_part_a.isdigit(): title_part_a=str(title_part_a) if sf[0:1] == "b": title_part_b=sf[1:] if title_part_b.isdigit(): title_part_b=str(title_part_b) title=title_part_a+" "+title_part_b title=title.replace("/","") return title,0
def is_archive(xml_string): """ It receives a MARCXML string; it parses the 008 field; and it examines the 14th byte. It the byte is "p" then the function return "Y", otherwise it returns "N". """ archive = "X" in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return 1 leader = record.get_field("000") #sys.stderr.write("leader:"+str(leader)+"\n") archive = "X" if len(leader) > 0: archive = leader[0][13:14] ## find out whether BIB is an archive type if archive == "p": archive = "Y" else: archive = "N" return archive
def get_items(xml_string): """ it receives a MARCXML string. MARCXML contains item information in the "999" element. it returns a list of "999" elements. each element is a string of fields separated by "|". """ items = [] outcome = 1 in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return copies = record.get_field("999") #01|holding-library|location|material-type| for copy in copies: subfield = copy.split("\\p") material_type = "xxxx" location = "xxxx" library = "xxxx" callnumber = "xxxx" barcode = "xxxx" available = "xxxx" for sf in subfield: if sf != "": if sf[0:1] == "l": location = str(sf[1:]) elif sf[0:1] == "t": material_type = str(sf[1:]) elif sf[0:1] == "m": library = (sf[1:]) elif sf[0:1] == "a": callnumber = (sf[1:]) elif sf[0:1] == "i": barcode = (sf[1:]) elif sf[0:1] == "b": available = (sf[1:]) if available != "available": location = "CHECKEDOUT" copy_info = library + "|" + location + "|" + material_type + "|" + callnumber + "|" + str( barcode) + "|" + available + "|" #print copy_info items.append(copy_info) return items, 0
def get_items(xml_string): """ it receives a MARCXML string that contains items and it produces a a list of items. each list element is a string library|location|material_type|callnumber|barcode|available """ items=[] outcome=1 in_string=xml_string.replace("\n","") try: record=marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return copies=record.get_field("999") for copy in copies: subfield=copy.split("\\p") material_type="xxxx" location="xxxx" library="xxxx" callnumber="xxxx" barcode="xxxx" available="xxxx" for sf in subfield: if sf != "": if sf[0:1] == "l": location=str(sf[1:]) elif sf[0:1] == "t": material_type=str(sf[1:]) elif sf[0:1] == "m": library=(sf[1:]) elif sf[0:1] == "a": callnumber=(sf[1:]) elif sf[0:1] == "i": barcode=(sf[1:]) elif sf[0:1] == "b": available=(sf[1:]) copy_info=library+"|"+location+"|"+material_type+"|"+callnumber+"|"+str(barcode)+"|"+available+"|" items.append(copy_info) return items,0
def get_bib_title(docid,get_record_url): """ it invokes the custom get_alma_bib to retrieve the bibliographic title. """ title="" outcome=1 try: xml_string,outcome=get_record(get_record_url,docid) except: sys.stderr.write("error getting xml:"+"\n") return "",1 #sys.stderr.write("get_bib_title: "+str(outcome)+"\n") if outcome == 0: in_string=xml_string.replace("\n","") try: record=marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return title_list=record.get_field("245") for title in title_list: title_part_a="" title_part_b="" subfield=title.split("\\p") title="" for sf in subfield: if sf != "": if sf[0:1] == "a": #title_part_a=str(sf[1:]) title_part_a=sf[1:] if title_part_a.isdigit(): title_part_a=str(title_part_a) if sf[0:1] == "b": #title_part_b=str(sf[1:]) title_part_b=sf[1:] if title_part_b.isdigit(): title_part_b=str(title_part_b) title=title_part_a+" "+title_part_b title=title.replace("/","") #title=urllib.quote(title) return title,outcome
def get_bib_title(docid,get_record_url): """ it uses get_record_url webservice to retrieve title that corresponds to docid. """ #sys.stderr.write("here:"+str(docid)+" "+str(get_record_url)+"\n") title="" outcome=1 try: xml_string,outcome=get_record(get_record_url,docid) except: sys.stderr.write("error getting xml:"+"\n") return "",1 #sys.stderr.write("get_bib_title: "+str(outcome)+"\n") if outcome == 0: in_string=xml_string.replace("\n","") try: record=marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return title_list=record.get_field("245") for title in title_list: title_part_a="" title_part_b="" subfield=title.split("\\p") title="" for sf in subfield: if sf != "": if sf[0:1] == "a": #title_part_a=str(sf[1:]) title_part_a=sf[1:] if title_part_a.isdigit(): title_part_a=str(title_part_a) if sf[0:1] == "b": #title_part_b=str(sf[1:]) title_part_b=sf[1:] if title_part_b.isdigit(): title_part_b=str(title_part_b) title=title_part_a+" "+title_part_b title=title.replace("/","") return title,outcome
def get_items(xml_string): """ it receives a MARCXML string in xml_string and it extracts the items held in "999" fields. """ items=[] outcome=1 in_string=xml_string.replace("\n","") try: record=marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return copies=record.get_field("999") #01|holding-library|location|material-type| for copy in copies: subfield=copy.split("\\p") material_type="xxxx" location="xxxx" library="xxxx" callnumber="xxxx" barcode="xxxx" available="xxxx" for sf in subfield: if sf != "": if sf[0:1] == "l": location=str(sf[1:]) elif sf[0:1] == "t": material_type=str(sf[1:]) elif sf[0:1] == "m": library=(sf[1:]) elif sf[0:1] == "a": callnumber=(sf[1:]) elif sf[0:1] == "i": barcode=(sf[1:]) elif sf[0:1] == "b": available=(sf[1:]) copy_info=library+"|"+location+"|"+material_type+"|"+callnumber+"|"+str(barcode)+"|"+available+"|" #sys.stderr.write(str(copy_info)+"\n") items.append(copy_info) return items,0
def get_bib_title(xml_string): """ it receives an MARCXML string of an "archive" record and it gets the record title based on the MARC 245 field. """ title = "" outcome = 0 in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return "", 1 title_list = record.get_field("245") for title in title_list: title_part_a = "" title_part_b = "" subfield = title.split("\\p") title = "" for sf in subfield: if sf != "": if sf[0:1] == "a": title_part_a = sf[1:] if title_part_a.isdigit(): title_part_a = str(title_part_a) if sf[0:1] == "b": title_part_b = sf[1:] if title_part_b.isdigit(): title_part_b = str(title_part_b) title = title_part_a + " " + title_part_b title = title.replace("/", "") #title=urllib.quote(title) #print "title:"+title return title, 0
def main(): """ It supports CGI. It expects a configuration file with the following variables: sys_email= api_host=https://api-na.hosted.exlibrisgroup.com/ user_apikey= alma_request_map= get_record_url= webserver= linked_data_host= docdelivery= reserves= marbl_booking= marbl_finding_aids= It supports Cross Origin Resource Sharing (CORS) by recognizing the HTTP "OPTIONS" REQUEST method. """ os.environ["LANG"] = "en_US.utf8" if len(sys.argv) < 2: sys.stderr.write("usage: config_file=" + "\n") print_error("system failure: no configuration file.") return 1 ####### ######## http_method = os.environ["REQUEST_METHOD"] #sys.stderr.write("http_method:"+http_method+"\n") doc_id = "" user_id = "" try: config = open(sys.argv[1], 'r') except: print_error("system failure: couldn't open config. file:" + sys.argv[1]) sys.stderr.write("couldn't open config. file:" + sys.argv[1] + "\n") return 1 sys_email = "*****@*****.**" get_ser_status = "" request_map = "" linked_data_host = "" preflight_hostnames = "" param = re.compile("(.*?)=(.*)") for line in config: line = line.rstrip("\n") m = param.match(line) if m: if m.group(1) == "sys_email": sys_email = m.group(2) if m.group(1) == "get_record_url": get_record_url = str(m.group(2)) if m.group(1) == "api_host": api_host = str(m.group(2)) if m.group(1) == "user_apikey": user_apikey = str(m.group(2)) if m.group(1) == "alma_request_map": request_map = str(m.group(2)) if m.group(1) == "webserver": webserver = m.group(2) if m.group(1) == "linked_data_host": linked_data_host = m.group(2) if m.group(1) == "docdelivery": docdelivery = m.group(2) if m.group(1) == "reserves": reserves = m.group(2) if m.group(1) == "marbl_booking": marbl_booking = m.group(2) if m.group(1) == "marbl_finding_aids": marbl_finding_aids = m.group(2) if m.group(1) == "preflight_hostnames": preflight_hostnames = m.group(2) config.close() if get_record_url == "": print_error("get_record_url url is missing in configuration file.") return 1 if api_host == "": print_error("api_host url is missing in configuration file.") return 1 if user_apikey == "": print_error("user_apikey is missing in configuration file.") return 1 if webserver == "": print_error("webserver is missing in configuration file.") return 1 if linked_data_host == "": print_error("linked_data_host is missing in configuration file.") return 1 if request_map == "": print_error("alma_request_map is missing in configuration file.") return 1 if docdelivery == "": print_error("docdelivery is missing in configuration file.") return 1 if preflight_hostnames == "": print_error("preflight_hostnames is missing in configuration file.") return 1 if marbl_booking == "": print_error("marbl_booking is missing in configuration file.") return 1 if reserves == "": print_error("reserves is missing in configuration file.") return 1 if marbl_finding_aids == "": print_error("marbl_finding_aids is missing in configuration file.") return 1 #docdelivery= #reserves= #marbl_booking= #marbl_finding_aids= try: req_file = open(request_map, 'Ur') except: print_error("System failure. alma_request_map doesn't exist.") return 1 req_file.close() try: context = alma_request.table(request_map) except: print_error("System failure. couldn't call alma_request module.") sys.stderr.write("couldn't call alma_request module" + "\n") return 1 if http_method == "OPTIONS": origin_rule = re.compile(r"(http://|https://)(.*)") try: origin = os.environ["HTTP_ORIGIN"] except: print_error("NO HTTP_ORIGIN") return 1 try: preflight_list = preflight_hostnames.split(";") except: preflight_list = [] m = origin_rule.match(origin) if m: if m.group(2) in preflight_list: #sys.stderr.write("ok_cors medusa\n") print_ok_cors("<result><code>OK</code></result>", origin) access_control = os.environ[ "HTTP_ACCESS_CONTROL_REQUEST_HEADERS"] else: print_notok_cors("<result><code>ERROR</code></result>") return 0 form = cgi.FieldStorage() if len(form) == 0: print_error("Expected doc_id and user_id") return 1 if 'doc_id' in form: doc_id = form.getfirst("doc_id") if 'user_id' in form: user_id = form.getfirst('user_id') else: print_error("Expected doc_id and user_id") return 1 #doc_id="990029881400302486" #user_id="0036487" doc_id = doc_id.replace(" ", "") if doc_id == "": print_error("doc_id must be a number") #sys.stderr.write("not a number\n") return 1 docid_list = doc_id.split(",") copies = [] archives = False requestable = 0 if user_id == "0": user_group = "XX" else: user_group, outcome = get_user_group(api_host, user_apikey, user_id) for docid in docid_list: try: docid = int(docid) except: print_error("doc_id must be a number") #sys.stderr.write("not a number\n") return 1 xml_string, outcome = get_record(get_record_url, docid) if outcome == 0: in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: print_error("failed to parse marcxml string.") sys.stderr.write("failed\n") return 1 leader = record.get_field("000") #sys.stderr.write("leader:"+str(leader)+"\n") archive = "X" if len(leader) > 0: archive = leader[0][11:12] ## find out whether BIB is an archive type if archive == "p": archive = "Y" else: archive = "N" copies = record.get_field("999") item, outcome = get_item_info(copies) if outcome == 0: if len(item) == 0: ### title has no copies. it might be a bound-with title. boundwith_docid, outcome = boundwith(in_string) #sys.stderr.write("boundwith:"+str(boundwith_docid)+"\n") if outcome == 0 and boundwith_docid != "": xml_string, outcome = get_record( get_record_url, boundwith_docid) if outcome == 0: in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: print_error("failed to parse marcxml string.") sys.stderr.write("failed\n") return 1 leader = record.get_field("000") archive = "N" if len(leader) > 0: archive = leader[0][11:12] if archive == "p": archive = "Y" else: archive = "N" copies = record.get_field("999") item, outcome = get_item_info(copies) doc_id = boundwith_docid #sys.stderr.write("boundwith copies:"+str(item)+" "+archive+"\n") if len(item) > 0: req_type = [] for i_info in item: i_field = i_info.split("|") #sys.stderr.write(str(user_group)+"|"+i_field[0]+"|"+i_field[1]+"|"+i_field[2]+"|"+archive+"|"+"\n") found_it = context.match_it( str(user_group) + "|" + i_field[0] + "|" + i_field[1] + "|" + i_field[2] + "|" + archive + "|") if found_it: menu = context.get_menu( str(user_group) + "|" + i_field[0] + "|" + i_field[1] + "|" + i_field[2] + "|" + archive + "|") if menu != "": #sys.stderr.write(str(user_group)+"|"+i_field[0]+"|"+i_field[1]+"|"+i_field[2]+"|"+archive+"|"+" menu:"+str(menu)+"\n") requestable = requestable + 1 request_list = menu.split(",") for req in request_list: req_name = req.split("@") req_type.append(req_name[0]) request_list = req_type else: sys.stderr.write("get_item_id failed:" + doc_id + "\n") #print_error("unable to process request: system failure(1).") requestable = 0 #return 1 # end of block if requestable > 0: combined_link = generate_link(request_list, marbl_booking, reserves, docdelivery, marbl_finding_aids, doc_id, user_id) #combined_link="<combined_link>"+menu+"</combined_link>" else: combined_link = "<combined_link>" + "</combined_link>" identities_link, outcome = worldcat_identities_link( str(docid_list[0]), linked_data_host) result_xml = "<link>" + combined_link + identities_link + "</link>" result_xml += "<user_group>" + str(user_group) + "</user_group>" result_xml = result_xml.replace("&", "&") print_result(result_xml) return 0
def generate_openurl(xml_string, genre, item_info, illiad_site_list): sub_library = "" item_location = "" holding_library = "" material = "" item_id = "" callnumber = "" author_100 = "" author_110 = "" author_700 = "" isbn = "" issn = "" pub_place = "" pub_date = "" publisher = "" universal_product_code = "" title = "" illiad_code = {} for site in illiad_site_list: alma_name, ill_name = site.split(":") illiad_code[alma_name] = ill_name i_field = item_info.split("|") try: item_id = str(i_field[4]) item_id = urllib.quote(item_id) except: pass doc_type = "" try: callnumber = str(i_field[3]) callnumber = urllib.quote(callnumber) if callnumber[0:2] == "XE": doc_type = "EUARB" else: doc_type = "RB" except: pass try: holding_library = str(i_field[0]) holding_library = urllib.quote(holding_library) except: pass try: illiad_libr_code = illiad_code[holding_library] except: illiad_libr_code = "EMU" sys.stderr.write("failed to get ILLIAD site code: " + str(holding_library) + "\n") #sys.stderr.write("ILLIAD site code: "+str(illiad_libr_code)+"\n") try: item_location = str(i_field[1]) item_location = urllib.quote(item_location) except: pass item_location = holding_library + "%20" + item_location in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return "", 1 author_100_field = record.get_field("100") for this_author in author_100_field: author_part_a = "" author_part_b = "" subfield = this_author.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": author_part_a = sf[1:] if sf[0:1] == "b": author_part_b = sf[1:] author_100 = author_part_a + " " + author_part_b author_100 = author_100.replace("/", "") author_100 = author_100.rstrip(" ") author_100 = author_100.rstrip(",") author_100 = simple_urlencode.encode(author_100) #print author_100 author_700_field = record.get_field("700") for this_author in author_700_field: author_part_a = "" author_part_b = "" subfield = this_author.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": author_part_a = sf[1:] if sf[0:1] == "b": author_part_b = sf[1:] author_700 = author_part_a + " " + author_part_b author_700 = author_700.replace("/", "") author_700 = author_700.rstrip(" ") author_700 = author_700.rstrip(",") author_700 = simple_urlencode.encode(author_700) author_110_field = record.get_field("110") for this_author in author_110_field: author_part_a = "" author_part_b = "" subfield = this_author.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": author_part_a = sf[1:] if sf[0:1] == "b": author_part_b = sf[1:] author_110 = author_part_a + " " + author_part_b author_110 = author_110.replace("/", "") author_110 = author_110.rstrip(" ") author_110 = author_110.rstrip(",") author_110 = simple_urlencode.encode(author_110) #print author_110 isbn_020 = record.get_field("020") for field in isbn_020: isbn_part_a = "" subfield = field.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": isbn_part_a = sf[1:] isbn = isbn_part_a isbn = isbn.rstrip(" ") isbn = str(isbn) #print "isbn:"+str(isbn) issn_022 = record.get_field("022") for field in issn_022: issn_part_a = "" subfield = field.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": issn_part_a = sf[1:] issn = issn_part_a issn = issn.rstrip(" ") issn = str(issn) #print "issn:"+str(issn) title_info = record.get_field("245") title_part_a = "" title_part_b = "" for field in title_info: subfield = field.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": title_part_a = sf[1:] if sf[0:1] == "b": title_part_b = sf[1:] try: title = title_part_a + " " + title_part_b title = title.replace("/", "") title = simple_urlencode.encode(title) title = title[0:100] title = title.rstrip("%") ### truncated url-encoded character except: sys.stderr.write("failed to get title\n") code_024_info = record.get_field("024") for field in code_024_info: indicator1 = str(field[4:5]) if indicator1 == '1': subfield = field.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": universal_product_code = sf[1:] universal_product_code = universal_product_code.rstrip( " ") universal_product_code = str(universal_product_code) pub_info = record.get_field("264") for field in pub_info: indicator2 = str(field[5:6]) if indicator2 == "1": subfield = field.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": pub_place = sf[1:] pub_place = pub_place.rstrip(" ") pub_place = pub_place.rstrip(",") pub_place = pub_place.replace("[", "") pub_place = pub_place.replace("]", "") pub_place = simple_urlencode.encode(pub_place) if sf[0:1] == "b": publisher = sf[1:] publisher = publisher.rstrip(" ") publisher = publisher.rstrip(",") publisher = simple_urlencode.encode(publisher) if sf[0:1] == "c": pub_date = sf[1:] pub_date = pub_date.rstrip(" ") pub_date = pub_date.rstrip(",") pub_260_info = record.get_field("260") for field in pub_260_info: subfield = field.split("\\p") for sf in subfield: if sf != "": if sf[0:1] == "a": pub_place = sf[1:] pub_place = pub_place.replace("[", "") pub_place = pub_place.replace("]", "") pub_place = simple_urlencode.encode(pub_place) if sf[0] == 'b': publisher = sf[1:] publisher = simple_urlencode.encode(publisher) if sf[0] == 'c': pub_date = sf[1:] pub_date = pub_date.replace("[", "") pub_date = pub_date.replace("]", "") pub_date = simple_urlencode.encode(pub_date) #print "pub place:"+str(pub_place) #print "publisher:"+str(publisher) #print "pub_date:"+str(pub_date) try: int(pub_date) pub_date = str(pub_date) except: pass if isbn == "": isbn = issn if isbn == "": isbn = universal_product_code if author_100 <> "": openurl = "&rft.genre=" + genre + "&rft.btitle=" + title + "&rft.title=" + title + "&rft.au=" + author_100 + "&rft.date=" + pub_date + "&rft.place=" + pub_place + "&rft.pub=" + publisher + "&rft.edition=" + "&rft.isbn=" + isbn + "&rft.callnumber=" + callnumber + "&rft.item_location=" + item_location + "&rft.barcode=" + item_id + "&rft.doctype=" + doc_type + "&rft.lib=" + illiad_libr_code elif author_110 <> "": openurl = "&rft.genre=" + genre + "&rft.btitle=" + title + "&rft.title=" + title + "&rft.au=" + author_110 + "&rft.date=" + pub_date + "&rft.place=" + pub_place + "&rft.pub=" + publisher + "&rft.edition=" + "&rft.isbn=" + isbn + "&rft.callnumber=" + callnumber + "&rft.item_location=" + item_location + "&rft.barcode=" + item_id + "&rft.doctype=" + doc_type + "&rft.lib=" + illiad_libr_code elif author_700 <> "": openurl = "&rft.genre=" + genre + "&rft.btitle=" + title + "&rft.title=" + title + "&rft.au=" + author_700 + "&rft.date=" + pub_date + "&rft.place=" + pub_place + "&rft.pub=" + publisher + "&rft.edition=" + "&rft.isbn=" + isbn + "&rft.callnumber=" + callnumber + "&rft.item_location=" + item_location + "&rft.barcode=" + item_id + "&rft.doctype=" + doc_type + "&rft.lib=" + illiad_libr_code else: openurl = "&rft.genre=" + genre + "&rft.btitle=" + title + "&rft.title=" + title + "&rft.date=" + pub_date + "&rft.place=" + pub_place + "&rft.pub=" + publisher + "&rft.edition=" + "&rft.isbn=" + isbn + "&rft.callnumber=" + callnumber + "&rft.item_location=" + item_location + "&rft.barcode=" + item_id + "&rft.doctype=" + doc_type + "&rft.lib=" + illiad_libr_code #sys.stderr.write("openurl:"+openurl+"\n") return openurl, 0
def main(): """ It supports CGI. It expects a configuration file with the following variables: sys_email= It supports Cross Origin Resource Sharing (CORS) by recognizing the HTTP "OPTIONS" REQUEST method. """ os.environ["LANG"] = "en_US.utf8" if len(sys.argv) < 2: sys.stderr.write("usage: config_file=" + "\n") print_error("system failure: no configuration file.") return 1 ####### ######## http_method = os.environ["REQUEST_METHOD"] doc_id = "" user_id = "0" form = cgi.FieldStorage() if len(form) == 0: print_error("Expected doc_id and user_id") return 1 if 'doc_id' in form: doc_id = form.getfirst("doc_id") if 'user_id' in form: user_id = form.getfirst('user_id') if doc_id == "": print_error("Expected doc_id") return 1 doc_id = doc_id.replace(" ", "") try: config = open(sys.argv[1], 'r') except: print_error("system failure: couldn't open config. file:" + sys.argv[1]) sys.stderr.write("couldn't open config. file:" + sys.argv[1] + "\n") return 1 sys_email = "*****@*****.**" get_ser_status = "" request_map = "" linked_data_host = "" non_requestable_page = "" html_page = "" param = re.compile("(.*?)=(.*)") for line in config: line = line.rstrip("\n") m = param.match(line) if m: if m.group(1) == "sys_email": sys_email = m.group(2) if m.group(1) == "get_record_url": get_record_url = str(m.group(2)) if m.group(1) == "api_host": api_host = str(m.group(2)) if m.group(1) == "user_apikey": user_apikey = str(m.group(2)) if m.group(1) == "alma_request_map": request_map = str(m.group(2)) if m.group(1) == "webserver": webserver = m.group(2) if m.group(1) == "linked_data_host": linked_data_host = m.group(2) if m.group(1) == "docdelivery": docdelivery = m.group(2) if m.group(1) == "reserves": reserves = m.group(2) if m.group(1) == "marbl_booking": marbl_booking = m.group(2) if m.group(1) == "marbl_finding_aids": marbl_finding_aids = m.group(2) if m.group(1) == "non_requestable_page": non_requestable_page = m.group(2) if m.group(1) == "html_page": html_file = m.group(2) config.close() if get_record_url == "": print_error("get_record_url url is missing in configuration file.") return 1 if api_host == "": print_error("api_host url is missing in configuration file.") return 1 if user_apikey == "": print_error("user_apikey is missing in configuration file.") return 1 if webserver == "": print_error("webserver is missing in configuration file.") return 1 if linked_data_host == "": print_error("linked_data_host is missing in configuration file.") return 1 if request_map == "": print_error("alma_request_map is missing in configuration file.") return 1 if docdelivery == "": print_error("docdelivery is missing in configuration file.") return 1 if marbl_booking == "": print_error("marbl_booking is missing in configuration file.") return 1 if reserves == "": print_error("reserves is missing in configuration file.") return 1 if marbl_finding_aids == "": print_error("marbl_finding_aids is missing in configuration file.") return 1 if html_file == "": print_error("html_file is missing in configuration file.") return 1 if non_requestable_page == "": print_error("non_requestable_page is missing in configuration file.") return 1 try: req_file = open(request_map, 'Ur') except: print_error("System failure. alma_request_map doesn't exist.") return 1 req_file.close() try: context = alma_request.table(request_map) except: sys.stderr.write("couldn't open request map:" + request_map + "\n") return 1 try: html_f = open(html_file, 'Ur') except: print_error("System failure:" + str(html_file) + " doesn't exist.") return 1 #html_f.close() try: non_requestable_f = open(non_requestable_page, 'Ur') except: print_error("System failure:" + str(non_requestable_page) + " doesn't exist.") return 1 non_requestable_f.close() if doc_id == "": print_error("doc_id must be a number") #sys.stderr.write("not a number\n") return 1 docid_list = doc_id.split(",") copies = [] archives = False requestable = 0 if user_id == "0": user_group = "XX" else: user_group, outcome = get_user_group(api_host, user_apikey, user_id) for docid in docid_list: try: docid = int(docid) except: print_error("doc_id must be a number") return 1 xml_string, outcome = get_record(get_record_url, docid) if outcome == 0: in_string = xml_string.replace("\n", "") try: record = marcxml.biblio(in_string) except: sys.stderr.write("failed\n") return 1 leader = record.get_field("000") archive = "X" if len(leader) > 0: archive = leader[0][11:12] ## find out whether BIB is an archive type if archive == "p": archive = "Y" else: archive = "N" copies = record.get_field("999") item, outcome = get_item_info(copies) if len(item) > 0: for i_info in item: i_field = i_info.split("|") found_it = context.match_it( str(user_group) + "|" + i_field[0] + "|" + i_field[1] + "|" + i_field[2] + "|" + archive + "|") if found_it: menu = context.get_menu( str(user_group) + "|" + i_field[0] + "|" + i_field[1] + "|" + i_field[2] + "|" + archive + "|") if menu != "": requestable = requestable + 1 request_list = menu.split(",") req_type = [] for req in request_list: req_name = req.split("@") req_type.append(req_name[0]) request_list = req_type break else: sys.stderr.write("get_item_id failed:" + doc_id + "\n") #print_error("unable to process request: system failure(1).") requestable = 0 #return 1 # end of block if requestable > 0: try: xml_string, outcome = get_record(get_record_url, doc_id) except: sys.stderr.write("item_info retrieval failed:" + "\n") report_failure("FAILURE", "get_record failed:" + str(doc_id), "") return 1 try: record_title, outcome = get_bib_title(xml_string) except: display_html_failure( "", "unable to process request: system failure..") return 1 try: findingaids_url, outcome = get_findingaids_url(xml_string) except: display_html_failure( "", "unable to process request: failed to get finding aids URL") return 1 if outcome == 0: display_html_page(html_f, record_title, doc_id, findingaids_url) else: findingaids_url = "http://findingaids.library.emory.edu/" display_html_page(html_f, record_title, doc_id, findingaids_url) html_f.close() else: error_message = "<p>Some material is not available for scanning. Please contact <a href=\"mailto:[email protected]\">ILL office</a> for additional information be sure to include the Title and Record ID above.</p>" display_html_page(html_f, "ERROR", doc_id, error_message) return 0