def test_add_series(self): edition = self._edition() edition.series = self._str edition.series_position = 5 record = Record() Annotator.add_series(record, edition) self._check_field(record, "490", { "a": edition.series, "v": str(edition.series_position), }, ["0", " "]) # If there's no series position, the same field is used without # the v subfield. edition.series_position = None record = Record() Annotator.add_series(record, edition) self._check_field(record, "490", { "a": edition.series, }, ["0", " "]) [field] = record.get_fields("490") eq_([], field.get_subfields("v")) # If there's no series, the field is left out. edition.series = None record = Record() Annotator.add_series(record, edition) eq_([], record.get_fields("490"))
def test02_marc_diff(self): m1 = Record() m1.add_field(Field(tag='001', data='abc')) m2 = Record() m2.add_field(Field(tag='001', data='abc')) diff = marc_diff(m1, m2) self.assertEqual(diff, '') diff = marc_diff(m1, m2, verbose=True) self.assertEqual(diff, '== =001 abc') m1.add_field(Field(tag='002', data='def')) m2.add_field(Field(tag='002', data='ghi')) diff = marc_diff(m1, m2) self.assertEqual(diff, '-< =002 def\n-> =002 ghi') diff = marc_diff(m1, m2, ignore=[2]) self.assertEqual(diff, '') diff = marc_diff(m1, m2, ignore=['002']) self.assertEqual(diff, '') m1.add_field(Field(tag='003', data='three')) m2.add_field(Field(tag='004', data='four')) diff = marc_diff(m1, m2, ignore=['002']) self.assertEqual(diff, '<< =003 three\n>> =004 four') diff = marc_diff(m1, m2, ignore=[2, 4]) self.assertEqual(diff, '<< =003 three') diff = marc_diff(m1, m2, ignore=[2, 3]) self.assertEqual(diff, '>> =004 four')
def test_add_simplified_genres(self): work = self._work(with_license_pool=True) fantasy, ignore = Genre.lookup(self._db, "Fantasy", autocreate=True) romance, ignore = Genre.lookup(self._db, "Romance", autocreate=True) work.genres = [fantasy, romance] record = Record() Annotator.add_simplified_genres(record, work) fields = record.get_fields("650") [fantasy_field, romance_field] = sorted(fields, key=lambda x: x.get_subfields("a")[0]) eq_(["0", "7"], fantasy_field.indicators) eq_("Fantasy", fantasy_field.get_subfields("a")[0]) eq_("Library Simplified", fantasy_field.get_subfields("2")[0]) eq_(["0", "7"], romance_field.indicators) eq_("Romance", romance_field.get_subfields("a")[0]) eq_("Library Simplified", romance_field.get_subfields("2")[0]) # It also works with a materialized work. self.add_to_materialized_view([work]) # The work is in the materialized view twice since it has two genres, # but we can use either one. [mw, ignore] = self._db.query(MaterializedWorkWithGenre).all() record = Record() Annotator.add_simplified_genres(record, mw) fields = record.get_fields("650") [fantasy_field, romance_field] = sorted(fields, key=lambda x: x.get_subfields("a")[0]) eq_(["0", "7"], fantasy_field.indicators) eq_("Fantasy", fantasy_field.get_subfields("a")[0]) eq_("Library Simplified", fantasy_field.get_subfields("2")[0]) eq_(["0", "7"], romance_field.indicators) eq_("Romance", romance_field.get_subfields("a")[0]) eq_("Library Simplified", romance_field.get_subfields("2")[0])
def test_build_string_list_from_fields(): """Test build_string_list_from_fields.""" record = Record() record.add_field( Field(tag='200', indicators=['0', '1'], subfields=[ 'a', 'Cerasi', 'b', 'Claudio et Elena', 'x', "Collections d'art" ])) data = build_string_list_from_fields(record=record, tag='200', subfields={ 'a': ', ', 'b': ', ', 'c': ', ', 'd': ', ', 'f': ', ', 'x': ' - ' }) assert data == ["Cerasi, Claudio et Elena - Collections d'art"] record = Record() record.add_field( Field(tag='210', indicators=['0', '1'], subfields=[ 'a', 'Place of public./distr.', 'b', 'Address/publisher/dist.', 'c', 'Name of publisher/dist.', 'd', 'Date', 'e', 'Place', 'f', 'Address' ])) data = build_string_list_from_fields(record=record, tag='210', subfields={ 'a': ', ', 'b': '. ', 'c': ', ', 'd': '; ', 'e': '; ', 'f': '; ' }, tag_grouping=[{ 'subtags': 'c', 'start': ' ( ', 'end': ' )', 'delimiter': '', 'subdelimiter': ', ' }, { 'subtags': 'def', 'start': ' ( ', 'end': ' )', 'delimiter': '', 'subdelimiter': '; ' }]) assert data == [ 'Place of public./distr.' '. Address/publisher/dist.' ' ( Name of publisher/dist. )' ' ( Date; Place; Address )' ]
def test_add_title(self): edition = self._edition() edition.title = "The Good Soldier" edition.sort_title = "Good Soldier, The" edition.subtitle = "A Tale of Passion" record = Record() Annotator.add_title(record, edition) [field] = record.get_fields("245") self._check_field(record, "245", { "a": edition.title, "b": edition.subtitle, "c": edition.author, }, ["0", "4"]) # If there's no subtitle or no author, those subfields are left out. edition.subtitle = None edition.author = None record = Record() Annotator.add_title(record, edition) [field] = record.get_fields("245") self._check_field(record, "245", { "a": edition.title, }, ["0", "4"]) eq_([], field.get_subfields("b")) eq_([], field.get_subfields("c"))
def test_add_physical_description(self): book = self._edition() book.medium = Edition.BOOK_MEDIUM audio = self._edition() audio.medium = Edition.AUDIO_MEDIUM record = Record() Annotator.add_physical_description(record, book) self._check_field(record, "300", {"a": "1 online resource"}) self._check_field(record, "336", { "a": "text", "b": "txt", "2": "rdacontent", }) self._check_field(record, "337", { "a": "computer", "b": "c", "2": "rdamedia", }) self._check_field(record, "338", { "a": "online resource", "b": "cr", "2": "rdacarrier", }) self._check_field(record, "347", { "a": "text file", "2": "rda", }) self._check_field(record, "380", { "a": "eBook", "2": "tlcgt", }) record = Record() Annotator.add_physical_description(record, audio) self._check_field(record, "300", { "a": "1 sound file", "b": "digital", }) self._check_field(record, "336", { "a": "spoken word", "b": "spw", "2": "rdacontent", }) self._check_field(record, "337", { "a": "computer", "b": "c", "2": "rdamedia", }) self._check_field(record, "338", { "a": "online resource", "b": "cr", "2": "rdacarrier", }) self._check_field(record, "347", { "a": "audio file", "2": "rda", }) eq_([], record.get_fields("380"))
def tearDown(self): # Test MARC record # NYPL bib self.n_marc = Record() # BPL bib self.b_marc = Record()
def create_record(cls, work, annotator, force_create=False, integration=None): """Build a complete MARC record for a given work.""" if callable(annotator): annotator = annotator() if isinstance(work, BaseMaterializedWork): pool = work.license_pool else: pool = work.active_license_pool() if not pool: return None edition = pool.presentation_edition identifier = pool.identifier _db = Session.object_session(work) record = None existing_record = getattr(work, annotator.marc_cache_field) if existing_record and not force_create: record = Record(data=existing_record.encode('utf-8'), force_utf8=True) if not record: record = Record(leader=annotator.leader(work), force_utf8=True) annotator.add_control_fields(record, identifier, pool, edition) annotator.add_isbn(record, identifier) # TODO: The 240 and 130 fields are for translated works, so they can be grouped even # though they have different titles. We do not group editions of the same work in # different languages, so we can't use those yet. annotator.add_title(record, edition) annotator.add_contributors(record, edition) annotator.add_publisher(record, edition) annotator.add_physical_description(record, edition) annotator.add_audience(record, work) annotator.add_series(record, edition) annotator.add_system_details(record) annotator.add_ebooks_subject(record) data = record.as_marc() if isinstance(work, BaseMaterializedWork): setattr(pool.work, annotator.marc_cache_field, data) else: setattr(work, annotator.marc_cache_field, data) # Add additional fields that should not be cached. annotator.annotate_work_record(work, pool, edition, identifier, record, integration) return record
def callZ3950(search_id, target, depth=0): if target == 'UIU': print "UIUC NUMBER: ", search_id query = zoom.Query('PQF', '@attr 1=12 %s' % str(search_id)) database_address = 'z3950.carli.illinois.edu' username = '******' database_name = 'voyager' else: print "LC NUMBER: ", search_id query = zoom.Query('PQF', '@attr 1=9 %s' % str(formatLCCN(search_id))) database_address = 'lx2.loc.gov' username = '' if 'n' in search_id: database_name = 'NAF' else: database_name = 'SAF' # conn = establishZ3950Connection(database_address,210,username,database_name) res = queryZ3950(database_address, username, database_name, query) print len(res) print res if len(res) > 0: for r in res: valid_leader = checkLeader(r.data[:24]) if valid_leader: if len(res) > 1: try: new_record = Record(data=r.data) except UnicodeDecodeError: return (False, 'BROKEN CHARACTER IN RECORD') lccn = new_record.get_fields('001')[0].data.replace( " ", "") if lccn == search_id: marc_record = new_record fixNames(marc_record) else: try: marc_record = Record(data=r.data) except UnicodeDecodeError: return (False, 'BROKEN CHARACTER IN RECORD') fixNames(marc_record) else: return (False, 'BROKEN LEADER') return (marc_record, None) elif depth < 20: waitSixSeconds(datetime.datetime.now().time()) return callZ3950(search_id, target, depth=depth + 1) else: return (None, 'RECORD NOT FOUND')
def test_add_web_client_urls(self): # Web client URLs can come from either the MARC export integration or # a library registry integration. annotator = LibraryAnnotator(self._default_library) # If no web catalog URLs are set for the library, nothing will be changed. record = Record() identifier = self._identifier(foreign_id="identifier") annotator.add_web_client_urls(record, self._default_library, identifier) eq_([], record.get_fields("856")) # Add a URL from a library registry. registry = self._external_integration( ExternalIntegration.OPDS_REGISTRATION, ExternalIntegration.DISCOVERY_GOAL, libraries=[self._default_library]) ConfigurationSetting.for_library_and_externalintegration( self._db, Registration.LIBRARY_REGISTRATION_WEB_CLIENT, self._default_library, registry).value = "http://web_catalog" record = Record() annotator.add_web_client_urls(record, self._default_library, identifier) [field] = record.get_fields("856") eq_(["4", "0"], field.indicators) eq_("http://web_catalog/book/Gutenberg%20ID%2Fidentifier", field.get_subfields("u")[0]) # Add a manually configured URL on a MARC export integration. integration = self._external_integration( ExternalIntegration.MARC_EXPORT, ExternalIntegration.CATALOG_GOAL, libraries=[self._default_library]) ConfigurationSetting.for_library_and_externalintegration( self._db, MARCExporter.WEB_CLIENT_URL, self._default_library, integration).value = "http://another_web_catalog" record = Record() annotator.add_web_client_urls(record, self._default_library, identifier, integration) [field1, field2] = record.get_fields("856") eq_(["4", "0"], field1.indicators) eq_("http://another_web_catalog/book/Gutenberg%20ID%2Fidentifier", field1.get_subfields("u")[0]) eq_(["4", "0"], field2.indicators) eq_("http://web_catalog/book/Gutenberg%20ID%2Fidentifier", field2.get_subfields("u")[0])
def test_dvd_case1(self): b = Record() b.add_field( Field(tag='949', indicators=[' ', '1'], subfields=[ 'i', '33333306093499', 'l', 'mya0v', 'p', '24.99', 't', '111', 'v', 'Midwest', 'n', 'o24643440', 'q', '10001' ])) b.add_field( Field(tag='949', indicators=[' ', '1'], subfields=[ 'i', '33333306093481', 'l', 'bta0v', 'p', '24.99', 't', '111', 'v', 'Midwest', 'n', 'o24643440', 'q', '10001' ])) b.add_field( Field(tag='901', indicators=[' ', ' '], subfields=['a', 'Midwest'])) b.add_field( Field(tag='949', indicators=[' ', ' '], subfields=['a', '*b2=v;'])) bibs.write_marc21('specs_test.mrc', b) b = Record() b.add_field( Field(tag='949', indicators=[' ', '1'], subfields=[ 'i', '33333306093457', 'l', 'mya0v', 'p', '14.99', 't', '206', 'v', 'Midwest', 'n', 'o24643282', 'q', '10001' ])) b.add_field( Field(tag='949', indicators=[' ', '1'], subfields=[ 'i', '33333306093432', 'l', 'bca0v', 'p', '14.99', 't', '206', 'v', 'Midwest', 'n', 'o24643282', 'q', '10001' ])) b.add_field( Field(tag='901', indicators=[' ', ' '], subfields=['a', 'Midwest'])) b.add_field( Field(tag='949', indicators=[' ', ' '], subfields=['a', '*b2=v;'])) bibs.write_marc21('specs_test.mrc', b) passed, report = local_specs.local_specs_validation( 'nypl', ['specs_test.mrc'], self.ncl) self.assertTrue(passed)
def test_add_summary(self): work = self._work(with_license_pool=True) work.summary_text = "<p>Summary</p>" record = Record() Annotator.add_summary(record, work) self._check_field(record, "520", {"a": " Summary "}) # It also works with a materialized work. self.add_to_materialized_view([work]) [mw] = self._db.query(MaterializedWorkWithGenre).all() record = Record() Annotator.add_summary(record, mw) self._check_field(record, "520", {"a": " Summary "})
def startElementNS(self, name, qname, attrs): # NO Stricts try: element, parameter = name[1].split(".") except ValueError: element = name[1] if element == "rusmarc": self._record = Record() elif element == "mrk": self._record.leader = "" elif element.startswith("m_"): pass # See endElementNS for implementation elif element == "IND": self._indicators = parameter.replace("_", " ") self._field.subfields = [] elif element == "FIELD": self._field = Field(parameter, [" ", " "]) elif element == "SUBFIELD": self._subfield_code = parameter elif element == "RECORDS": pass else: raise RuntimeError("cannot process tag %s" % element) self._text = []
def test_960_items_nonrepeatable_subfields(self): b = Record() b.add_field( Field(tag='960', indicators=[' ', ' '], subfields=[ 'i', 'TEST', 'i', 'TEST', 'l', 'TEST', 'l', 'TEST', 'p', '9.99', 'p', '9.99', 'q', 'TEST', 'q', 'TEST', 'o', 'TEST', 'o', 'TEST', 't', 'TEST', 't', 'TEST', 'r', 'TEST', 'r', 'TEST', 's', 'TEST', 's', 'TEST', 'v', 'TEST', 'v', 'TEST', 'n', 'TEST', 'n', 'TEST', 'v', 'TEST', 'v', 'TEST' ])) bibs.write_marc21('specs_test.mrc', b) passed, report = local_specs.local_specs_validation( 'bpl', ['specs_test.mrc'], self.bcl) self.assertFalse(passed) self.assertIn('"i" subfield is not repeatable.', report) self.assertIn('"l" subfield is not repeatable.', report) self.assertIn('"p" subfield is not repeatable.', report) self.assertIn('"q" subfield is not repeatable.', report) self.assertIn('"o" subfield is not repeatable.', report) self.assertIn('"t" subfield is not repeatable.', report) self.assertIn('"r" subfield is not repeatable.', report) self.assertIn('"s" subfield is not repeatable.', report) self.assertIn('"v" subfield is not repeatable.', report) self.assertIn('"n" subfield is not repeatable.', report)
def platform2pymarc_obj(data=None): """ converts platform bib data into pymarc object args: data in json format return: pymarc Record obj """ record = Record(to_unicode=True, force_utf8=True) # parse variable fields varFields = data.get("varFields") for f in varFields: if f.get("fieldTag") == "_": record.leader = f.get("content") # control fields case elif f.get("subfields") is None: field = Field( tag=f.get("marcTag"), indicators=[f.get("ind1"), f.get("ind2")], data=f.get("content"), ) record.add_field(field) else: # variable fields subfields = [] for d in f.get("subfields"): subfields.append(d.get("tag")) subfields.append(d.get("content")) field = Field( tag=f.get("marcTag"), indicators=[f.get("ind1"), f.get("ind2")], subfields=subfields, ) record.add_field(field) return record
def transpose_to_marc21(record): Mrecord=Record(force_utf8=True) Mrecord.leader=record["_LEADER"] for field in record: if isint(field): if int(field)<10: if isinstance(record[field],list): for elem in record[field]: Mrecord.add_field(Field(tag=field,data=elem)) elif isinstance(record[field],str): Mrecord.add_field(Field(tag=field,data=record[field])) else: for subfield in record[field]: for ind, values in subfield.items(): indicators=[] subfields=[] for elem in values: for k,v in elem.items(): if isinstance(v,str): subfields.append(k) subfields.append(v) elif isinstance(v,list): for subfield_elem in v: subfields.append(k) subfields.append(subfield_elem) for elem in ind: indicators.append(elem) Mrecord.add_field(Field(tag=str(field), indicators=indicators, subfields=subfields)) return Mrecord.as_marc()
def next(self): """ To support iteration. Some marc files separate records with a \r\n """ first2 = self.file_handle.read(2) while first2 == "\r\n": first2 = self.file_handle.read(2) first5 = first2 + self.file_handle.read(3) if not first5 or first5 == "\x00\x00\x00\x00\x00": raise StopIteration if len(first5) < 5: raise RecordLengthInvalid length = int(first5) chunk = self.file_handle.read(length - 5) chunk = first5 + chunk record = Record(chunk, to_unicode=self.to_unicode, force_utf8=self.force_utf8, hide_utf8_warnings=self.hide_utf8_warnings, utf8_handling=self.utf8_handling) return record
def element(self, element_dict, name=None): if not name: self._record = Record() self.element(element_dict, 'leader') elif name == 'leader': self._record.leader = element_dict[name] self.element(element_dict, 'fields') elif name == 'fields': fields = iter(element_dict[name]) for field in fields: tag, remaining = field.popitem() self._field = Field(tag) if self._field.is_control_field(): self._field.data = remaining else: self.element(remaining, 'subfields') self._field.indicators.extend( [remaining['ind1'], remaining['ind2']]) self._record.add_field(self._field) self.process_record(self._record) elif name == 'subfields': subfields = iter(element_dict[name]) for subfield in subfields: code, text = subfield.popitem() self._field.add_subfield(code, text)
def test_process_record(self): for record_type in self.records: for r in self.records[record_type]: original_record = Record() #nimiön 6. paikasta katsotaan tietuetyyppi: if record_type == "music": original_record.leader = "XXXXXXcX" elif record_type == "text": original_record.leader = "XXXXXXaX" elif record_type == "movie": original_record.leader = "XXXXXXgX" else: raise ValueError("Testattava aineistotyyppi on tuntematon") original_record.add_field(Field(tag='001', data='00000001')) if record_type == "movie": original_record.add_field(Field(tag='007', data='v')) original_fields = [] for field in r['original']: original_fields.append(field) original_record.add_field(self.str_to_marc(field)) new_record = self.cc.process_record(original_record) new_fields = [] result_fields = [] for field in new_record.get_fields(): if not field.tag in ['001', '007']: new_fields.append(str(field)) for field in r['converted']: result_fields.append(field) self.assertEqual(result_fields, new_fields)
def test_nypl_branch_BT_SERIES_Spanish_prefix(self): bib = Record() bib.leader = "00000nam a2200000u 4500" tags = [] tags.append(Field(tag="001", data="0001")) tags.append( Field(tag="245", indicators=["0", "0"], subfields=["a", "Test title"])) tags.append( Field( tag="091", indicators=[" ", " "], subfields=["a", "J SPA E COMPOUND NAME"], )) for tag in tags: bib.add_ordered_field(tag) mod_bib = patches.bib_patches("nypl", "branches", "cat", "BT SERIES", bib) correct_indicators = [" ", " "] correct_subfields = ["p", "J SPA", "a", "E", "c", "COMPOUND NAME"] self.assertEqual(correct_indicators, mod_bib.get_fields("091")[0].indicators) self.assertEqual(correct_subfields, mod_bib.get_fields("091")[0].subfields)
def read_iso(file_name: str) -> list: result = [] temp_name = "临时.iso" # 读入数据 fp = open(file_name, 'r', encoding='utf-8') for index, data in enumerate(fp): # 把当前这行数据写入临时文件 # try: fp_temp = open(temp_name, 'w', encoding='utf-8') fp_temp.write(data) fp_temp.close() # 用marc形式读取 fh = open(temp_name, 'rb') try: reader = MARCReader(fh) record = next(reader) except (NoFieldsFound, UnicodeDecodeError): # 如果未从网站爬下,存在使用无内容的数据占位的数据.仍用无内容的数据补位. record = Record() except RecordLengthInvalid: # 读取数据多了最后一行的回车符,则跳出 break finally: fh.close() result.append(record) fp.close() os.remove(temp_name) return result
def element(self, element_dict, name=None): """Converts a JSON `element_dict` to pymarc fields.""" if not name: self._record = Record() self.element(element_dict, "leader") elif name == "leader": self._record.leader = element_dict[name] self.element(element_dict, "fields") elif name == "fields": fields = iter(element_dict[name]) for field in fields: tag, remaining = field.popitem() self._field = Field(tag) if self._field.is_control_field(): self._field.data = remaining else: self.element(remaining, "subfields") self._field.indicators.extend( [remaining["ind1"], remaining["ind2"]]) self._record.add_field(self._field) self.process_record(self._record) elif name == "subfields": subfields = iter(element_dict[name]) for subfield in subfields: code, text = subfield.popitem() self._field.add_subfield(code, text)
def test_nypl_branches_BT_SERIES_YA_graphic_novel_compound_name(self): bib = Record() bib.leader = "00000nam a2200000u 4500" tags = [] tags.append(Field(tag="001", data="0001")) tags.append( Field(tag="245", indicators=["0", "0"], subfields=["a", "Test title"])) tags.append( Field( tag="091", indicators=[" ", " "], subfields=["a", "GRAPHIC GN FIC COMPOUND NAME"], )) for tag in tags: bib.add_ordered_field(tag) mod_bib = patches.bib_patches("nypl", "branches", "cat", "BT SERIES", bib) correct_indicators = [" ", " "] correct_subfields = [ "f", "GRAPHIC", "a", "GN FIC", "c", "COMPOUND NAME" ] self.assertEqual(correct_indicators, mod_bib.get_fields("091")[0].indicators) self.assertEqual(correct_subfields, mod_bib.get_fields("091")[0].subfields)
def __next__(self): first5 = self.file_handle.read(5) if not first5: raise StopIteration if len(first5) < 5: raise RecordLengthInvalid try: length = int(first5) except ValueError: raise RecordLengthInvalid chunk = self.file_handle.read(length - 5) chunk = first5 + chunk self._current_chunk = chunk self._current_exception = None try: record = Record( chunk, to_unicode=self.to_unicode, force_utf8=self.force_utf8, hide_utf8_warnings=self.hide_utf8_warnings, utf8_handling=self.utf8_handling, file_encoding=self.file_encoding, ) except (PymarcException, UnicodeDecodeError, ValueError) as ex: if self.permissive: self._current_exception = ex record = None else: raise ex return record
def search(self, key, value, marc_flag=False): """ Convenience function. Called by utils.app_helper.HandlerHelper.query_josiah() """ try: qstring = self.build_qstring(key, value) qobject = self.build_qobject(qstring) resultset = self.connection.search(qobject) log.debug('resultset, ```%s```' % resultset) log.debug('len(resultset), `%s`' % len(resultset)) items = [] for result in resultset: result_dct = {'pymarc_obj': None, 'holdings_data': None} result_dct['pymarc_obj'] = None try: result_dct['pymarc_obj'] = Record( data=result.data.bibliographicRecord.encoding[1]) result_dct['holdings_data'] = self.add_holdings_data( result) items.append(result_dct) except AttributeError as e: log.warning( 'exception getting bibliographicRecord, ```%s```, so skipping this record' % unicode(repr(e)) ) ## note: i considered still checking & returning the holdings, but decided against it because i need to be able to return a bib if something useful is found in a holdings entry. pass log.debug('len(items), `%s`; items, %s' % (len(items), pprint.pformat(items))) return items except Exception as e: self.close_connection() message = 'exception, ```%s```' % unicode(repr(e)) # error_dict = self.make_error_dict() # log.error( 'in z3950_wrapper.Searcher.search(); error-info, `%s`' % pprint.pformat(error_dict) ) log.error(message)
def test_add_summary(self): work = self._work(with_license_pool=True) work.summary_text = "<p>Summary</p>" record = Record() Annotator.add_summary(record, work) self._check_field(record, "520", {"a": b" Summary "})
def test_949_items_empty_price_subfield(self): b = Record() b.add_field( Field(tag='949', indicators=[' ', '1'], subfields=['p', ''])) bibs.write_marc21('specs_test.mrc', b) passed, report = local_specs.local_specs_validation( 'nypl', ['specs_test.mrc'], self.ncl) self.assertIn('"p" subfield has incorrect price format.', report)
def test_949_items_stat_code_incorrect(self): b = Record() b.add_field( Field(tag='949', indicators=[' ', '1'], subfields=['t', '600'])) bibs.write_marc21('specs_test.mrc', b) passed, report = local_specs.local_specs_validation( 'nypl', ['specs_test.mrc'], self.ncl) self.assertIn('"t" subfield has incorrect value.', report)
def test_960_items_correct_price_format(self): b = Record() b.add_field( Field(tag='960', indicators=[' ', '1'], subfields=['p', '9.99'])) bibs.write_marc21('specs_test.mrc', b) passed, report = local_specs.local_specs_validation( 'bpl', ['specs_test.mrc'], self.bcl) self.assertNotIn('"p" subfield has incorrect price format.', report)
def test_960_items_incorrect_format(self): b = Record() b.add_field( Field(tag='960', indicators=[' ', ' '], subfields=['r', 'z'])) bibs.write_marc21('specs_test.mrc', b) passed, report = local_specs.local_specs_validation( 'bpl', ['specs_test.mrc'], self.bcl) self.assertIn('"r" subfield has incorrect value.', report)