def process_object_row(object, current_id): object_id = row[indices['object_id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if object_id != current_id: # will likely have multiple rows save(object) current_id = object_id object = {} if elasticsearch_connection.item_exists(object_id, classification): object = elasticsearch_connection.get_item( object_id, classification) else: print "%s could not be found!" % object_id return (object, current_id) if 'flexfields' not in object: object['flexfields'] = {} groupname = row[indices['group_name_index']] if groupname not in object['flexfields']: object['flexfields'][groupname] = [] fieldname = row[indices['field_name_index']] fieldvalue = row[indices['field_value_index']] object['flexfields'][groupname].append({fieldname: fieldvalue}) return (object, current_id)
def process_object_row(object, current_id): object_id = row[indices['object_id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if object_id != current_id: # will likely have multiple rows save(object) current_id = object_id object = {} if elasticsearch_connection.item_exists(object_id, classification): object = elasticsearch_connection.get_item(object_id, classification) else: print "%s could not be found!" % object_id return (object, current_id) if 'altnums' not in object: object['altnums'] = [] altnum = row[indices['altnum_index']] prefix_idx = altnum.find('_') without_prefix = altnum[prefix_idx+1:] description = row[indices['description_index']] if row[indices['description_index']] != "NULL" else "" object['altnums'].append({"altnum" : altnum, "description" : description, 'without_prefix': without_prefix}) object['allnumbers'].extend((altnum, without_prefix)) return (object, current_id)
def process_object_row(object, current_id): object_id = row[indices['object_id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if object_id != current_id: # will likely have multiple rows save(object) current_id = object_id object = {} if elasticsearch_connection.item_exists(object_id, classification): object = elasticsearch_connection.get_item( object_id, classification) else: print "%s could not be found!" % object_id return (object, current_id) if 'altnums' not in object: object['altnums'] = [] altnum = row[indices['altnum_index']] prefix_idx = altnum.find('_') without_prefix = altnum[prefix_idx + 1:] description = row[indices['description_index']] if row[ indices['description_index']] != "NULL" else "" object['altnums'].append({ "altnum": altnum, "description": description, 'without_prefix': without_prefix }) object['allnumbers'].extend((altnum, without_prefix)) return (object, current_id)
def process_object_row(object, current_id): object_id = row[indices['object_id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if object_id != current_id: # will likely have multiple rows save(object) current_id = object_id object = {} if elasticsearch_connection.item_exists(object_id, classification): object = elasticsearch_connection.get_item(object_id, classification) else: print "%s could not be found!" % object_id return (object, current_id) if 'flexfields' not in object: object['flexfields'] = {} groupname = row[indices['group_name_index']] if groupname not in object['flexfields']: object['flexfields'][groupname] = [] fieldname = row[indices['field_name_index']] fieldvalue = row[indices['field_value_index']] object['flexfields'][groupname].append({fieldname : fieldvalue}) return (object, current_id)
def process_object_row(object, current_id): id = row[id_index] classification_key = int(row[classification_id_index]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} media_master_id = row[media_master_id_index] if 'photos' not in object['relateditems']: object['relateditems']['photos'] = [] object['relateditems']['photos'].append({ 'id' : media_master_id, 'displaytext' : media_master_id}) return(object, current_id)
def process_constituent_row(constituent, current_id): constituent_id = row[indices['constituent_id_index']] type_key = int(row[indices['type_id_index']]) type = CONSTITUENTTYPES.get(type_key) if constituent_id != current_id: # will likely have multiple rows for one constituent because of many related objects # only get a new constituent if we have a new constituent id, but first save old constituent to elasticsearch save(constituent) current_id = constituent_id constituent = {} if elasticsearch_connection.item_exists(constituent_id, type): constituent = elasticsearch_connection.get_item( constituent_id, type) else: print "%s could not be found!" % constituent_id return (constituent, current_id) if 'relateditems' not in constituent: constituent['relateditems'] = {} classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[indices['object_id_index']]) thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) date = "" if row[indices['object_date_index']].lower( ) == "null" else row[indices['object_date_index']] object_title = row[indices['object_title_index']] object_number = row[indices['object_number_index']] if classification == "diarypages" and object_title.lower() == "null": idx = object_number.find('_') object_title = object_number[idx + 1:] if object_title.lower() == "null": object_title = "[No Title]" if classification not in constituent['relateditems']: constituent['relateditems'][classification] = [] constituent['relateditems'][classification].append({ 'id': object_id, 'title': object_title, 'displaytext': object_title, 'classificationid': classification_key, 'number': object_number, 'date': date, 'thumbnail': thumbnail_url }) # keep the related items sorted constituent['relateditems'][classification].sort( key=operator.itemgetter('displaytext')) return (constituent, current_id)
def process_pub_row(pub, current_id): pub_id = row[indices['pub_id_index']] if pub_id != current_id: save(pub) current_id = pub_id pub = {} if elasticsearch_connection.item_exists(pub_id, 'pubdocs', ELASTICSEARCH_INDEX): pub = elasticsearch_connection.get_item( pub_id, 'pubdocs', ELASTICSEARCH_INDEX) else: print("%s could not be found!" % pub_id) return (pub, current_id) if 'relateditems' not in pub: pub['relateditems'] = {} classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[indices['object_id_index']]) thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) drs_id = "" if row[indices['drs_id']].lower() == "null" else row[ indices['drs_id']] has_manifest = False if drs_id == "" else True if not thumbnail_url and drs_id: thumbnail_url = create_thumbnail_url(drs_id) date = "" if row[indices['object_date_index']].lower( ) == "null" else row[indices['object_date_index']] object_title = row[indices['object_title_index']] object_number = row[indices['object_number_index']] if classification == "diarypages" and object_title.lower() == "null": idx = object_number.find('_') object_title = object_number[idx + 1:] if object_title.lower() == "null": object_title = "[No Title]" if classification not in pub['relateditems']: pub['relateditems'][classification] = [] pub['relateditems'][classification].append({ 'id': object_id, 'title': object_title, 'displaytext': object_title, 'classificationid': classification_key, 'number': object_number, 'date': date, 'thumbnail': thumbnail_url, 'has_manifest': has_manifest }) # keep the related items sorted pub['relateditems'][classification].sort( key=operator.itemgetter('displaytext')) return (pub, current_id)
def process_media_row(media, current_id): id = row[indices['media_id_index']] media_type_key = int(row[indices['media_type_id_index']]) media_type = MEDIATYPES.get(media_type_key) # for now, ignore Microfilm and Document media types if (media_type_key in [4, 5]): return (media, current_id) if id != current_id: save(media) current_id = id media = {} if elasticsearch_connection.item_exists(id, media_type): media = elasticsearch_connection.get_item(id, media_type) else: print "%s could not be found!" % id return (media, current_id) if 'relateditems' not in media: media['relateditems'] = {} classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[indices['object_id_index']]) thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) date = "" if row[indices['object_date_index']].lower( ) == "null" else row[indices['object_date_index']] object_title = row[indices['object_title_index']] object_number = row[indices['object_number_index']] if classification == "diarypages" and object_title.lower() == "null": idx = object_number.find('_') object_title = object_number[idx + 1:] if object_title.lower() == "null": object_title = "[No Title]" if classification not in media['relateditems']: media['relateditems'][classification] = [] media['relateditems'][classification].append({ 'id': object_id, 'title': object_title, 'displaytext': object_title, 'classificationid': classification_key, 'number': object_number, 'date': date, 'thumbnail': thumbnail_url }) # keep the related items sorted media['relateditems'][classification].sort( key=operator.itemgetter('displaytext')) return (media, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return (object, current_id) if 'relateditems' not in object: object['relateditems'] = {} constituent_id = row[indices['constituent_id_index']] display_name = row[indices['display_name_index']] description = row[indices['remarks_index']] if row[ indices['remarks_index']] != "NULL" else "" display_date = "" if row[indices['display_date_index']] != "NULL": display_date = row[indices['display_date_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) constituent_dict = {} role = row[indices['role_index']] # update the set of roles for this object if role not in object['roles']: object['roles'].append(role) constituent_dict['role'] = row[indices['role_index']] constituent_dict['roleid'] = row[indices['role_id_index']] constituent_dict['id'] = constituent_id constituent_dict['displayname'] = display_name constituent_dict['displaydate'] = display_date constituent_dict['displaytext'] = display_name constituent_dict['description'] = description constituent_dict['thumbnail'] = thumbnail_url constituent_type_key = int(row[indices['constituent_type_id_index']]) constituent_type = CONSTITUENTTYPES.get(constituent_type_key) if constituent_type not in object['relateditems']: object['relateditems'][constituent_type] = [] object['relateditems'][constituent_type].append(constituent_dict) # keep the related items sorted object['relateditems'][constituent_type].sort( key=operator.itemgetter('displaytext')) return (object, current_id)
def process_object_row(object, current_id): id = row[id_index] classification_key = int(row[classification_id_index]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} constituent_id = row[constituent_id_index] display_name = row[display_name_index] display_date = "" if row[display_date_index] != "NULL": display_date = row[display_date_index] constituent_dict = {} constituent_dict['role'] = row[role_index] constituent_dict['roleid'] = row[role_id_index] constituent_dict['id'] = constituent_id constituent_dict['displayname'] = display_name constituent_dict['displaydate'] = display_date constituent_dict['displaytext'] = display_name constituent_type_key = int(row[constituent_type_id_index]) constituent_type = CONSTITUENTTYPES.get(constituent_type_key) if constituent_type not in object['relateditems']: object['relateditems'][constituent_type] = [] object['relateditems'][constituent_type].append(constituent_dict) # parse out any constituents "Mentioned on this page" (RoleID==48) if constituent_dict['roleid'] == '48': if 'mentioned' not in object: object['mentioned'] = {} if 'people' not in object['mentioned']: object['mentioned']['people'] = [] object['mentioned']['people'].append(constituent_dict) # parse out any "Author" (RoleID==50) if constituent_dict['roleid'] == '50': if 'author' not in object: object['author'] = [] object['author'].append(constituent_dict) return(object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} media_type_key = int(row[indices['media_type_id_index']]) media_type = MEDIATYPES.get(media_type_key) media_master_id = row[indices['media_master_id_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) main_url = get_media_url(row[indices['main_path_index']], row[indices['main_file_index']]) display_text = row[indices['caption_index']] # this is a bit of a hack because the MediaFormats for videos (in the TMS database) does not correctly identify the type of video # so, make sure we are only using videos that are mp4s if media_type_key == 3: if not row[indices['main_file_index']].endswith('mp4'): return(object, current_id) if media_type not in object['relateditems']: object['relateditems'][media_type] = [] if media_type == 'photos': object['hasphoto'] = "Yes" # add primary photo as a top level item as well if row[indices['primary_display_index']] == '1': object['primarydisplay'] = { 'thumbnail' : thumbnail_url, 'main' : main_url, 'displaytext' : display_text } if not (classification == '3dmodels' and media_type == '3dmodels'): object['relateditems'][media_type].append({ 'id' : media_master_id, 'displaytext' : display_text, 'primarydisplay' : True if row[indices['primary_display_index']] == '1' else False, 'thumbnail' : thumbnail_url, 'main' : main_url }) return(object, current_id)
def process_constituent_row(constituent, current_id): constituent_id = row[indices["constituent_id_index"]] type_key = int(row[indices["type_id_index"]]) type = CONSTITUENTTYPES.get(type_key) if constituent_id != current_id: # will likely have multiple rows for one constituent because of many related objects # only get a new constituent if we have a new constituent id, but first save old constituent to elasticsearch save(constituent) current_id = constituent_id constituent = {} if elasticsearch_connection.item_exists(constituent_id, type): constituent = elasticsearch_connection.get_item(constituent_id, type) else: print "%s could not be found!" % constituent_id return (constituent, current_id) if "relateditems" not in constituent: constituent["relateditems"] = {} classification_key = int(row[indices["classification_id_index"]]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[indices["object_id_index"]]) thumbnail_url = get_media_url(row[indices["thumb_path_index"]], row[indices["thumb_file_index"]]) date = "" if row[indices["object_date_index"]].lower() == "null" else row[indices["object_date_index"]] object_title = row[indices["object_title_index"]] object_number = row[indices["object_number_index"]] if classification == "diarypages" and object_title.lower() == "null": idx = object_number.find("_") object_title = object_number[idx + 1 :] if object_title.lower() == "null": object_title = "[No Title]" if classification not in constituent["relateditems"]: constituent["relateditems"][classification] = [] constituent["relateditems"][classification].append( { "id": object_id, "title": object_title, "displaytext": object_title, "classificationid": classification_key, "number": object_number, "date": date, "thumbnail": thumbnail_url, } ) # keep the related items sorted constituent["relateditems"][classification].sort(key=operator.itemgetter("displaytext")) return (constituent, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} constituent_id = row[indices['constituent_id_index']] display_name = row[indices['display_name_index']] description = row[indices['remarks_index']] if row[indices['remarks_index']] != "NULL" else "" display_date = "" if row[indices['display_date_index']] != "NULL": display_date = row[indices['display_date_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) constituent_dict = {} role = row[indices['role_index']] # update the set of roles for this object if role not in object['roles']: object['roles'].append(role) constituent_dict['role'] = row[indices['role_index']] constituent_dict['roleid'] = row[indices['role_id_index']] constituent_dict['id'] = constituent_id constituent_dict['displayname'] = display_name constituent_dict['displaydate'] = display_date constituent_dict['displaytext'] = display_name constituent_dict['description'] = description constituent_dict['thumbnail'] = thumbnail_url constituent_type_key = int(row[indices['constituent_type_id_index']]) constituent_type = CONSTITUENTTYPES.get(constituent_type_key) if constituent_type not in object['relateditems']: object['relateditems'][constituent_type] = [] object['relateditems'][constituent_type].append(constituent_dict) # keep the related items sorted object['relateditems'][constituent_type].sort(key=operator.itemgetter('displaytext')) return(object, current_id)
def process_site_row(site, current_id): site_id = row[indices['site_id_index']] #if site_id not in SAMPLE_SITES: # continue if site_id != current_id: # will likely have multiple rows for one site because of many related objects # only get a new site if we have a new site id, but first save old site to elasticsearch save(site) current_id = site_id site = {} if elasticsearch_connection.item_exists(site_id, 'sites'): site = elasticsearch_connection.get_item(site_id, 'sites') else: print "%s could not be found!" % site_id return (site, current_id) if 'relateditems' not in site: site['relateditems'] = {} classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[indices['object_id_index']]) thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) date = "" if row[indices['object_date_index']].lower( ) == "null" else row[indices['object_date_index']] object_title = row[indices['object_title_index']] object_number = row[indices['object_number_index']] if classification == "diarypages" and object_title.lower() == "null": idx = object_number.find('_') object_title = object_number[idx + 1:] if object_title.lower() == "null": object_title = "[No Title]" if classification not in site['relateditems']: site['relateditems'][classification] = [] site['relateditems'][classification].append({ 'id': object_id, 'title': object_title, 'displaytext': object_title, 'classificationid': classification_key, 'number': object_number, 'date': date, 'thumbnail': thumbnail_url }) # keep the related items sorted site['relateditems'][classification].sort( key=operator.itemgetter('displaytext')) return (site, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) number = row[indices['object_number_index']] #if id not in SAMPLE_OBJECTS: # return (object, current_id) # I don't think there are any duplicate rows for objects, but keep it here since it doesn't break anything if id != current_id: save(object) current_id = id object = {} object['classification'] = classification # loop through each row for index, value in enumerate(columns): key = value.lower() row_value = row[index] # cleanup row data if row_value.isdigit(): row_value = int(row_value) elif row_value == "NULL": row_value = None else: row_value = row_value.replace(',,', '') if 'title' in key: object_title = row_value if classification == "diarypages" and object_title is None: idx = number.find('_') object_title = number[idx + 1:] object[key] = object_title else: object[key] = row_value else: # remove whitespace at end of line if a string object[key] = row_value.rstrip( ) if type(row_value) is str else row_value # Add some extra fields not in the TMS data object['displaytext'] = object['title'] prefix_idx = number.find('_') object['allnumbers'] = list( set([number, number[prefix_idx + 1:], "".join(number.split())])) object['roles'] = [] object['hasphoto'] = "No" return (object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) object_number = row[indices['object_number_index']] #if id not in SAMPLE_OBJECTS: # return (object, current_id) # I don't think there are any duplicate rows for objects, but keep it here since it doesn't break anything if id != current_id: save(object) current_id = id object = {} object['classification'] = classification # loop through each row for index, value in enumerate(columns): key = value.lower() row_value = row[index] # cleanup row data if row_value.isdigit(): row_value = int(row_value) elif row_value == "NULL": row_value = None else: row_value = row_value.replace(',,','') if 'title' in key: object_title = row_value if classification == "diarypages" and object_title is None: idx = object_number.find('_') object_title = object_number[idx+1:] object[key] = object_title else: object[key] = row_value else: # remove whitespace at end of line if a string object[key] = row_value.rstrip() if type(row_value) is str else row_value # Add some extra fields not in the TMS data object['displaytext'] = object['title'] prefix_idx = object_number.find('_') object['allnumbers'] = [object_number, object_number[prefix_idx+1:]] object['roles'] = [] object['hasphoto'] = "No" return (object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return (object, current_id) if 'relateditems' not in object: object['relateditems'] = {} site_id = row[indices['site_id_index']] site_name = row[indices['site_name_index']] site_number = row[indices['site_number_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) site_dict = {} site_dict['id'] = site_id site_dict['sitename'] = site_name site_dict['sitenumber'] = site_number site_dict['displaytext'] = site_number site_dict['thumbnail'] = thumbnail_url if 'sites' not in object['relateditems']: object['relateditems']['sites'] = [] object['relateditems']['sites'].append(site_dict) # keep the related items sorted object['relateditems']['sites'].sort( key=operator.itemgetter('displaytext')) # for unpubdocs, add sites for "Mentioned on this page" if classification == "unpubdocs": if 'mentioned' not in object: object['mentioned'] = {} if 'sites' not in object['mentioned']: object['mentioned']['sites'] = [] object['mentioned']['sites'].append(site_dict) return (object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} site_id = row[indices['site_id_index']] site_name = row[indices['site_name_index']] site_number = row[indices['site_number_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) site_dict = {} site_dict['id'] = site_id site_dict['sitename'] = site_name site_dict['sitenumber'] = site_number site_dict['displaytext'] = site_number site_dict['thumbnail'] = thumbnail_url if 'sites' not in object['relateditems']: object['relateditems']['sites'] = [] object['relateditems']['sites'].append(site_dict) # keep the related items sorted object['relateditems']['sites'].sort(key=operator.itemgetter('displaytext')) # for unpubdocs, add sites for "Mentioned on this page" if classification == "unpubdocs": if 'mentioned' not in object: object['mentioned'] = {} if 'sites' not in object['mentioned']: object['mentioned']['sites'] = [] object['mentioned']['sites'].append(site_dict) return(object, current_id)
def process_site_row(site, current_id): site_id = row[indices['site_id_index']] #if site_id not in SAMPLE_SITES: # continue if site_id != current_id: # will likely have multiple rows for one site because of many related objects # only get a new site if we have a new site id, but first save old site to elasticsearch save(site) current_id = site_id site = {} if elasticsearch_connection.item_exists(site_id, 'sites'): site = elasticsearch_connection.get_item(site_id, 'sites') else: print "%s could not be found!" % site_id return (site, current_id) if 'relateditems' not in site: site['relateditems'] = {} classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[indices['object_id_index']]) thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) date = "" if row[indices['object_date_index']].lower() == "null" else row[indices['object_date_index']] object_title = row[indices['object_title_index']] object_number = row[indices['object_number_index']] if classification == "diarypages" and object_title.lower() == "null": idx = object_number.find('_') object_title = object_number[idx+1:] if object_title.lower() == "null": object_title = "[No Title]" if classification not in site['relateditems']: site['relateditems'][classification] = [] site['relateditems'][classification].append({ 'id' : object_id, 'title' : object_title, 'displaytext' : object_title, 'classificationid' : classification_key, 'number' : object_number, 'date' : date, 'thumbnail' : thumbnail_url}) # keep the related items sorted site['relateditems'][classification].sort(key=operator.itemgetter('displaytext')) return (site, current_id)
def process_pub_row(pub, current_id): pub_id = row[indices['pub_id_index']] if pub_id != current_id: save(pub) current_id = pub_id pub = {} if elasticsearch_connection.item_exists(pub_id, 'pubdocs'): pub = elasticsearch_connection.get_item(pub_id, 'pubdocs') else: print "%s could not be found!" % pub_id return (pub, current_id) if 'relateditems' not in pub: pub['relateditems'] = {} classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[indices['object_id_index']]) thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) date = "" if row[indices['object_date_index']].lower() == "null" else row[indices['object_date_index']] object_title = row[indices['object_title_index']] object_number = row[indices['object_number_index']] if classification == "diarypages" and object_title.lower() == "null": idx = object_number.find('_') object_title = object_number[idx+1:] if object_title.lower() == "null": object_title = "[No Title]" if classification not in pub['relateditems']: pub['relateditems'][classification] = [] pub['relateditems'][classification].append({ 'id' : object_id, 'title' : object_title, 'displaytext' : object_title, 'classificationid' : classification_key, 'number' : object_number, 'date' : date, 'thumbnail' : thumbnail_url}) # keep the related items sorted pub['relateditems'][classification].sort(key=operator.itemgetter('displaytext')) return (pub, current_id)
def process_object_row(object, current_id): id = row[id_index] classification_key = int(row[classification_id_index]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} site_id = row[site_id_index] site_name = row[site_name_index] site_number = row[site_number_index] site_dict = {} site_dict['id'] = site_id site_dict['sitename'] = site_name site_dict['sitenumber'] = site_number site_dict['displaytext'] = "%s, %s" % (site_name, site_number) if 'sites' not in object['relateditems']: object['relateditems']['sites'] = [] object['relateditems']['sites'].append(site_dict) # for unpubdocs, add sites for "Mentioned on this page" if classification == "unpubdocs": if 'mentioned' not in object: object['mentioned'] = {} if 'sites' not in object['mentioned']: object['mentioned']['sites'] = [] object['mentioned']['sites'].append(site_dict) return(object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return (object, current_id) if 'relateditems' not in object: object['relateditems'] = {} unpublished_id = row[indices['unpublished_id_index']] unpublished_title = row[indices['unpublished_title_index']] number = row[indices['object_number_index']] date = "" if row[indices['object_date_index']].lower( ) == "null" else row[indices['object_date_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) if 'unpubdocs' not in object['relateditems']: object['relateditems']['unpubdocs'] = [] object['relateditems']['unpubdocs'].append({ 'id': unpublished_id, 'text': unpublished_title, 'displaytext': unpublished_title, 'date': date, 'number': number, 'thumbnail': thumbnail_url }) # keep the related items sorted object['relateditems']['unpubdocs'].sort( key=operator.itemgetter('displaytext')) return (object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return (object, current_id) if 'relateditems' not in object: object['relateditems'] = {} reference_id = row[indices['reference_id_index']] title = row[indices['title_index']] boiler_text = row[indices['boiler_text_index']] date = row[indices['date_index']] main_url = get_media_url(row[indices['path_index']], row[indices['file_index']]) if 'pubdocs' not in object['relateditems']: object['relateditems']['pubdocs'] = [] object['relateditems']['pubdocs'].append({ 'id': reference_id, 'boilertext': boiler_text, 'displaytext': boiler_text, 'date': date, 'url': main_url }) # keep the related items sorted object['relateditems']['pubdocs'].sort( key=operator.itemgetter('displaytext')) return (object, current_id)
def process_site_row(site, current_id): site_id = row[site_id_index] #if site_id not in SAMPLE_SITES: # continue if site_id != current_id: # will likely have multiple rows for one site because of many related objects # only get a new site if we have a new site id, but first save old site to elasticsearch save(site) current_id = site_id site = {} if elasticsearch_connection.item_exists(site_id, 'sites'): site = elasticsearch_connection.get_item(site_id, 'sites') else: print "%s could not be found!" % site_id return (site, current_id) if 'relateditems' not in site: site['relateditems'] = {} classification_key = int(row[classification_id_index]) classification = CLASSIFICATIONS.get(classification_key) object_id = int(row[object_id_index]) object_title = row[object_title_index] if classification == "diarypages" and object_title.lower() == "null": object_number = row[object_number_index] idx = object_number.find('_') object_title = object_number[idx+1:] if object_title.lower() == "null": object_title = "[No Title]" if classification not in site['relateditems']: site['relateditems'][classification] = [] site['relateditems'][classification].append({ 'id' : object_id, 'title' : object_title, 'displaytext' : object_title, 'classificationid' : classification_key}) return (site, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} unpublished_id = row[indices['unpublished_id_index']] unpublished_title = row[indices['unpublished_title_index']] number = row[indices['object_number_index']] date = "" if row[indices['object_date_index']].lower() == "null" else row[indices['object_date_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) if 'unpubdocs' not in object['relateditems']: object['relateditems']['unpubdocs'] = [] object['relateditems']['unpubdocs'].append({ 'id' : unpublished_id, 'text' : unpublished_title, 'displaytext' : unpublished_title, 'date' : date, 'number' : number, 'thumbnail' : thumbnail_url}) # keep the related items sorted object['relateditems']['unpubdocs'].sort(key=operator.itemgetter('displaytext')) return(object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) geocode_dict = {} geocode_dict['id'] = row[indices['geo_code_id_index']] geocode_dict['geocode'] = row[indices['geo_code_index']] geocode_dict['region'] = row[indices['region_index']] geocode_dict['city'] = row[indices['city_index']] object['geocode'] = geocode_dict return(object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return (object, current_id) geocode_dict = {} geocode_dict['id'] = row[indices['geo_code_id_index']] geocode_dict['geocode'] = row[indices['geo_code_index']] geocode_dict['region'] = row[indices['region_index']] geocode_dict['city'] = row[indices['city_index']] object['geocode'] = geocode_dict return (object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return(object, current_id) if 'relateditems' not in object: object['relateditems'] = {} reference_id = row[indices['reference_id_index']] title = row[indices['title_index']] boiler_text = row[indices['boiler_text_index']] date = row[indices['date_index']] main_url = get_media_url(row[indices['path_index']], row[indices['file_index']]) if 'pubdocs' not in object['relateditems']: object['relateditems']['pubdocs'] = [] object['relateditems']['pubdocs'].append({ 'id' : reference_id, 'boilertext' : boiler_text, 'displaytext' : boiler_text, 'date' : date, 'url' : main_url}) # keep the related items sorted object['relateditems']['pubdocs'].sort(key=operator.itemgetter('displaytext')) return(object, current_id)
def process_object_row(object, current_id): id = row[indices['id_index']] classification_key = int(row[indices['classification_id_index']]) classification = CLASSIFICATIONS.get(classification_key) if id != current_id: # may have multiple rows for one object because of many related constituents save(object) current_id = id object = {} if elasticsearch_connection.item_exists(id, classification): object = elasticsearch_connection.get_item(id, classification) else: print "%s could not be found!" % id return (object, current_id) if 'relateditems' not in object: object['relateditems'] = {} media_type_key = int(row[indices['media_type_id_index']]) media_type = MEDIATYPES.get(media_type_key) number = "" if row[indices['rendition_number_index']].lower( ) == "null" else row[indices['rendition_number_index']] media_master_id = row[indices['media_master_id_index']] thumbnail_url = get_media_url(row[indices['thumb_path_index']], row[indices['thumb_file_index']]) main_url = get_media_url(row[indices['main_path_index']], row[indices['main_file_index']]) description = "" if row[indices['description_index']].lower( ) == "null" else row[indices['description_index']] mediaview = "" if row[indices['media_view_index']].lower( ) == "null" else row[indices['media_view_index']] caption = "" if row[indices['caption_index']].lower( ) == "null" else row[indices['caption_index']] display_text = ": ".join([mediaview, caption]) # this is a bit of a hack because the MediaFormats for videos (in the TMS database) does not correctly identify the type of video # so, make sure we are only using videos that are mp4s if media_type_key == 3: if not row[indices['main_file_index']].endswith('mp4'): return (object, current_id) if media_type not in object['relateditems']: object['relateditems'][media_type] = [] if media_type == 'photos': object['hasphoto'] = "Yes" # add primary photo as a top level item as well if row[indices['primary_display_index']] == '1': object['primarydisplay'] = { 'thumbnail': thumbnail_url, 'main': main_url, 'displaytext': display_text, 'number': number, 'description': description } if not (classification == '3dmodels' and media_type == '3dmodels'): object['relateditems'][media_type].append({ 'id': media_master_id, 'displaytext': display_text, 'primarydisplay': True if row[indices['primary_display_index']] == '1' else False, 'thumbnail': thumbnail_url, 'main': main_url, 'number': number, 'description': description }) return (object, current_id)