Ejemplo n.º 1
0
def try_merge(edition, ekey, thing):
    thing_type = thing['type']['key']
    if 'isbn_10' not in edition:
        print(edition)
    asin = edition.get('isbn_10', None) or edition['asin']
    if 'authors' in edition:
        authors = [i['name'] for i in edition['authors']]
    else:
        authors = []
    a = amazon_merge.build_amazon(edition, authors)
    assert isinstance(asin, six.string_types)
    assert thing_type == '/type/edition'
    #print edition['asin'], ekey
    if 'source_records' in thing:
        if 'amazon:' + asin in thing['source_records']:
            return True
        return source_records_match(a, thing)

    #print 'no source records'
    mc = get_mc(ekey)
    #print 'mc:', mc
    if mc == 'amazon:' + asin:
        return True
    if not mc:
        return False
    data = get_from_local(mc)
    e1 = build_marc(fast_parse.read_edition(data))
    return amazon_merge.attempt_merge(a, e1, threshold, debug=False)
Ejemplo n.º 2
0
def try_merge(edition, ekey, thing):
    thing_type = thing['type']['key']
    if 'isbn_10' not in edition:
        print edition
    asin = edition.get('isbn_10', None) or edition['asin']
    if 'authors' in edition:
        authors = [i['name'] for i in edition['authors']]
    else:
        authors = []
    a = amazon_merge.build_amazon(edition, authors)
    assert isinstance(asin, basestring)
    assert thing_type == '/type/edition'
    #print edition['asin'], ekey
    if 'source_records' in thing:
        if 'amazon:' + asin in thing['source_records']:
            return True
        return source_records_match(a, thing)

    #print 'no source records'
    mc = get_mc(ekey)
    #print 'mc:', mc
    if mc == 'amazon:' + asin:
        return True
    if not mc:
        return False
    data = get_from_local(mc)
    e1 = build_marc(fast_parse.read_edition(data))
    return amazon_merge.attempt_merge(a, e1, threshold, debug=False)
Ejemplo n.º 3
0
def load_part(archive_id, part, start_pos=0):
    print 'load_part:', archive_id, part
    global rec_no, t_prev, load_count
    full_part = archive_id + "/" + part
    f = open(rc['marc_path'] + "/" + full_part)
    if start_pos:
        f.seek(start_pos)
    for pos, loc, data in read_marc_file(full_part, f, pos=start_pos):
        rec_no += 1
        if rec_no % chunk == 0:
            progress(archive_id, rec_no, start_pos, pos)

        if is_loaded(loc):
            continue
        want = ['001', '003', '010', '020', '035', '245']
        try:
            index_fields = fast_parse.index_fields(data, want)
        except KeyError:
            print loc
            print fast_parse.get_tag_lines(data, ['245'])
            raise
        except AssertionError:
            print loc
            raise
        if not index_fields or 'title' not in index_fields:
            continue

        edition_pool = pool.build(index_fields)

        if not edition_pool:
            yield loc, data
            continue

        rec = fast_parse.read_edition(data)
        e1 = build_marc(rec)

        match = False
        seen = set()
        for k, v in edition_pool.iteritems():
            for edition_key in v:
                if edition_key in seen:
                    continue
                thing = None
                while not thing or thing['type']['key'] == '/type/redirect':
                    seen.add(edition_key)
                    thing = withKey(edition_key)
                    assert thing
                    if thing['type']['key'] == '/type/redirect':
                        print 'following redirect %s => %s' % (edition_key, thing['location'])
                        edition_key = thing['location']
                if try_merge(e1, edition_key, thing):
                    add_source_records(edition_key, loc, thing, data)
                    match = True
                    break
            if match:
                break

        if not match:
            yield loc, data
Ejemplo n.º 4
0
def marc_match(e1, loc):
    rec = fast_parse.read_edition(get_from_local(loc))
    try:
        e2 = build_marc(rec)
    except TypeError:
        print rec
        raise
    return attempt_merge(e1, e2, threshold, debug=False)
Ejemplo n.º 5
0
def get_record(key, mc):
    data = get_from_archive(mc)
    try:
        rec = fast_parse.read_edition(data)
    except (fast_parse.SoundRecording, IndexError, AssertionError):
        print mc
        print edition_key
        return False
    try:
        return marc.build_marc(rec)
    except TypeError:
        print rec
        raise
Ejemplo n.º 6
0
def get_record(key, mc):
    data = get_from_archive(mc)
    try:
        rec = fast_parse.read_edition(data)
    except (fast_parse.SoundRecording, IndexError, AssertionError):
        print(mc)
        print(key)
        return False
    try:
        return marc.build_marc(rec)
    except TypeError:
        print(rec)
        raise
Ejemplo n.º 7
0
def get_marc(loc):
    try:
        filename, p, l = loc.split(':')
    except ValueError:
        return None
    if not os.path.exists(marc_path + filename):
        return None
    f = open(marc_path + filename)
    f.seek(int(p))
    buf = f.read(int(l))
    f.close()
    rec = fast_parse.read_edition(buf)
    if rec:
        return build_marc(rec)
Ejemplo n.º 8
0
def get_marc(loc):
    try:
        filename, p, l = loc.split(':')
    except ValueError:
        return None
    if not os.path.exists(marc_path + filename):
        return None
    f = open(marc_path + filename)
    f.seek(int(p))
    buf = f.read(int(l))
    f.close()
    rec = fast_parse.read_edition(buf)
    if rec:
        return build_marc(rec)
Ejemplo n.º 9
0
def load_part(archive_id, part, start_pos=0):
    print('load_part:', archive_id, part)
    global rec_no, t_prev, load_count
    full_part = archive_id + "/" + part
    f = open(rc['marc_path'] + "/" + full_part)
    if start_pos:
        f.seek(start_pos)
    for pos, loc, data in read_marc_file(full_part, f, pos=start_pos):
        rec_no += 1
        if rec_no % chunk == 0:
            progress(archive_id, rec_no, start_pos, pos)

        if is_loaded(loc):
            continue
        want = ['001', '003', '010', '020', '035', '245']
        try:
            index_fields = fast_parse.index_fields(data, want)
        except KeyError:
            print(loc)
            print(fast_parse.get_tag_lines(data, ['245']))
            raise
        except AssertionError:
            print(loc)
            raise
        if not index_fields or 'title' not in index_fields:
            continue

        edition_pool = pool.build(index_fields)

        if not edition_pool:
            continue

        rec = fast_parse.read_edition(data)
        e1 = build_marc(rec)

        match = False
        seen = set()
        for k, v in edition_pool.iteritems():
            for edition_key in v:
                if edition_key in seen:
                    continue
                seen.add(edition_key)
                thing = withKey(edition_key)
                assert thing
                if try_merge(e1, edition_key, thing):
                    add_source_records(edition_key, loc, thing)
                    match = True

        if not match:
            yield loc, data
Ejemplo n.º 10
0
def get_ia(ia):
    # read MARC record of scanned book from archive.org
    # try the XML first because it has better character encoding
    # if there is a problem with the XML switch to the binary MARC
    xml_file = ia + "_marc.xml"
    loc = ia + "/" + xml_file
    for attempt in range(3):
        if os.path.exists(xml_path + xml_file):
            f = open(xml_path + xml_file)
        else:
            f = urlopen_keep_trying(base + loc)
        if f:
            try:
                return loc, read_xml.read_edition(f)
            except read_xml.BadXML:
                pass
            except xml.parsers.expat.ExpatError:
                print 'XML parse error:', base + loc
                pass
        sleep(2)
    url = base + ia + "/" + ia + "_meta.mrc"
    try:
        f = urlopen_keep_trying(url)
    except urllib2.URLError:
        pass
    if not f:
        return None, None
    data = f.read()
    length = data[0:5]
    loc = ia + "/" + ia + "_meta.mrc:0:" + length
    if len(data) == 0:
        print 'zero length MARC for', url
        return None, None
    if 'Internet Archive: Error' in data:
        print 'internet archive error for', url
        return None, None
    try:
        return ia, fast_parse.read_edition(data, accept_electronic = True)
    except (ValueError, AssertionError):
        print `data`
        raise
Ejemplo n.º 11
0
def marc_match(a, loc):
    assert loc
    rec = fast_parse.read_edition(get_from_local(loc))
    e1 = build_marc(rec)
    #print 'amazon:', a
    return amazon_merge.attempt_merge(a, e1, threshold, debug=False)
Ejemplo n.º 12
0
def marc_match(a, loc):
    assert loc
    rec = fast_parse.read_edition(get_from_local(loc))
    e1 = build_marc(rec)
    #print 'amazon:', a
    return amazon_merge.attempt_merge(a, e1, threshold, debug=False)
Ejemplo n.º 13
0
         if not a:
             return False
         try:
             return amazon.attempt_merge(a, e1, threshold, debug=False)
         except:
             print a
             print e1
             print thing['key']
             raise
     print 'mc:', mc
     try:
         assert not mc.startswith('ia:')
         data = get_from_local(mc)
         if not data:
             return True
         rec2 = fast_parse.read_edition(data)
     except (fast_parse.SoundRecording, IndexError, AssertionError):
         print mc
         print edition_key
         return False
     except:
         print mc
         print edition_key
         raise
 if not rec2:
     return False
 try:
     e2 = build_marc(rec2)
 except TypeError:
     print rec2
     raise