def go_through_object_store(objectStoreRoot, reverseLists): ryw.give_news2('go_through_object_store: entered...<BR>', logging.info) searchFile = reverseLists.searchFile for objID,version in objectstore.objectversioniterator(objectStoreRoot): objstr = objID + '#' + str(version) ryw.db_print2('examining ' + objstr + '...<BR>', 7) success,meta = searchFile.get_meta(objID, version) if not success or not meta: ryw.give_bad_news('go_through_object_store: get_meta2 failed: ' + objstr, logging.warning) continue isList = meta.has_key('sys_attrs') and 'isList' in meta['sys_attrs'] if not isList: ryw.db_print2('go_through_object_store: not a list object: ' + objstr + '<BR>', 6) ryw.give_news2(' . ', logging.info) continue ryw.db_print2('go_through_object_sotre: found a list.<BR>', 6) containees = ReverseLists.read_container_file( objID, version, searchFile, RepositoryRoot) ryw.db_print2('go_through_object_store: containees are: ' + repr(containees) + '<BR>', 7) if not reverseLists.add(objstr, containees): ryw.give_bad_news('go_through_object_store: ReverseLists.add ' + 'failed: ' + objstr, logging.error) else: ryw.db_print2('go_through_object_store: successfully added.<BR>', 7) alias = 'unnamed' if meta.has_key('content_alias'): alias = meta['content_alias'] ryw.give_news2(objstr + ' ' + alias + ', ', logging.info) ryw.give_news2('<BR>done.', logging.info)
def process_disk_from_peer_repository(dir_name, diskRoot, overwrite=False): objectroot = os.path.join(dir_name, 'objects') if not os.path.exists(objectroot): logging.info( 'process_disk_from_peer_repository: no objects directory.' + objectroot) return True ## Process all incoming objects local_object_root = get_local_object_store_root() if local_object_root is None: return False mapDict = ryw_philips.get_map(diskRoot) searchFile = None for objectId,version in objectstore.objectversioniterator(objectroot): ryw.give_news3('----------', logging.info) if mapDict == None: ryw.give_bad_news( 'process_disk_from_peer_repository: failed to read map file: '+ diskRoot, logging.error) return False # # We used to just skip objects already present. # now we want to do something. # #if object_version_is_present(local_object_root, objectId, version): # ## object already present # continue objectFound = object_version_is_present(local_object_root, objectId, version) paths = objectstore.name_version_to_paths_aux(objectroot, objectId, version) itemName = objectId + '#' + str(version) obdata = get_data_name_mirror(paths[0], diskRoot, mapDict, itemName) if not obdata: continue success,isStub = is_data_stub(obdata) if not success: continue metadata = get_metadata_mirror(paths[1]) if not metadata: continue auxdir = get_aux_name_mirror(paths[2], diskRoot, mapDict, itemName) if isStub and not objectFound: #ryw.give_bad_news( # 'ProcessDiscs: is a stub but not found in database: '+ # itemName, logging.error) ryw.give_news3( 'ProcessDiscs error: is a stub but not found in database: '+ itemName, logging.error) continue if isStub: success,searchFile = deal_with_stub( local_object_root, objectId, version, metadata, obdata, auxdir, searchFile = searchFile) if success: ryw.give_news3( 'ProcessDiscs success: meta data processed.', logging.info) continue if objectFound: #ryw.give_bad_news( # 'ProcessDiscs: not a stub but found in the database: '+ # itemName, logging.error) ryw.give_news3( 'ProcessDiscs: not a stub but found in the database: '+ itemName, logging.error) if not overwrite: ryw.give_news3('processing skipped.', logging.error) continue # # I might want to delete the old version here. # using the code from DelObject, should be simple. # ryw.give_news3('deleting it...', logging.error) # # at one point, I thought I wanted to be clever and # tried to reuse searchFile below. But the trouble is # that the UploadObject.uploadobject() call below will # change the SearchFile beneath me, and if I reuse # the searchFile here, I end up flushing the incorrectly # cached version back to disk. I actually would have # expected a deadlock when UploadObject.uploadobject() # tries to lock again but the deadlock somehow # didn't happen... # success,searchFile = DeleteObject.do_delete( objectId, version, searchFile=None) if not success: ryw.give_bad_news( 'ProcessDiscs: DeleteObject failed: ' + objectId + '#' + str(version), logging.error) continue else: ryw.db_print('process_disk_from_peer_repository: ' + 'do_delete succeeded.', 18) # # falls through to continue onto adding the object. # if not UploadObject.uploadobject(metadata, obdata, auxdir, hasVersion = True): ryw.give_bad_news( 'process_disk_from_peer_repository: ' + 'UploadObject.uploadobject failed: '+ repr(metadata) + ' ' + obdata, logging.critical) continue else: ryw.give_news3('ProcessDiscs success: new data uploaded.', logging.info) continue incomingReverseLists = os.path.join(dir_name, 'ReverseLists') existingReverseLists = os.path.join(RepositoryRoot, 'ReverseLists') ReverseLists.merge_incoming( existingReverseLists, incomingReverseLists, RepositoryRoot, searchFile = searchFile) if searchFile: searchFile.done() return True