def delete_all(searchSel):
    completeSuccess = True
    searchFile = None
    
    for objstr in searchSel:
        success,objID,version = ryw.split_objstr(objstr)
        if not success:
            ryw.give_bad_news('DelSearchAll: invalid objstr: ' + objstr,
                              logging.error)
            completeSuccess = False
            continue
        success,searchFile = DeleteObject.do_delete(
            objID, version, searchFile=searchFile)
        if not success:
            ryw.give_bad_news(
                'DelSearchAll: DeleteObject.do_delete failed.' + objstr,
                logging.error)
            completeSuccess = False
        else:
            ryw.db_print('DelSearchAll.delete_all: do_delete succeeded.',
                         18)

    if searchFile:
        searchFile.done()
    return completeSuccess
def process_disk_from_peer_repository(dir_name, diskRoot, overwrite=False):

    objectroot = os.path.join(dir_name, 'objects')
    if not os.path.exists(objectroot):
        logging.info(
            'process_disk_from_peer_repository: no objects directory.' +
            objectroot)
        return True
    
    ## Process all incoming objects

    local_object_root = get_local_object_store_root()
    if local_object_root is None:
        return False

    mapDict = ryw_philips.get_map(diskRoot)
    searchFile = None

    for objectId,version in objectstore.objectversioniterator(objectroot):

        ryw.give_news3('----------', logging.info)

        if mapDict == None:
            ryw.give_bad_news(
                'process_disk_from_peer_repository: failed to read map file: '+
                diskRoot, logging.error)
            return False

        #
        # We used to just skip objects already present.
        # now we want to do something.
        #
        #if object_version_is_present(local_object_root, objectId, version):
        #    ## object already present
        #    continue
        objectFound = object_version_is_present(local_object_root,
                                                objectId,
                                                version)

        paths = objectstore.name_version_to_paths_aux(objectroot,
                                                      objectId, version)

        itemName = objectId + '#' + str(version)

        obdata = get_data_name_mirror(paths[0], diskRoot, mapDict, itemName)
        if not obdata:
            continue

        success,isStub = is_data_stub(obdata)
        if not success:
            continue

        metadata = get_metadata_mirror(paths[1])
        if not metadata:
            continue

        auxdir = get_aux_name_mirror(paths[2], diskRoot, mapDict, itemName)

        if isStub and not objectFound:
            #ryw.give_bad_news(
            #    'ProcessDiscs: is a stub but not found in database: '+
            #    itemName, logging.error)
            ryw.give_news3(
                'ProcessDiscs error: is a stub but not found in database: '+
                itemName, logging.error)
            continue
        
        if isStub:
            success,searchFile = deal_with_stub(
                local_object_root, objectId, version, metadata,
                obdata, auxdir, searchFile = searchFile)
            if success:
                ryw.give_news3(
                    'ProcessDiscs success: meta data processed.',
                    logging.info)
            continue

        if objectFound:
            #ryw.give_bad_news(
            #    'ProcessDiscs: not a stub but found in the database: '+
            #    itemName, logging.error)
            ryw.give_news3(
                'ProcessDiscs: not a stub but found in the database: '+
                itemName, logging.error)

            if not overwrite:
                ryw.give_news3('processing skipped.', logging.error)
                continue
            
            #
            # I might want to delete the old version here.
            # using the code from DelObject, should be simple.
            #
            ryw.give_news3('deleting it...', logging.error)

            #
            # at one point, I thought I wanted to be clever and
            # tried to reuse searchFile below.  But the trouble is
            # that the UploadObject.uploadobject() call below will
            # change the SearchFile beneath me, and if I reuse
            # the searchFile here, I end up flushing the incorrectly
            # cached version back to disk. I actually would have
            # expected a deadlock when UploadObject.uploadobject()
            # tries to lock again but the deadlock somehow
            # didn't happen...
            #
            success,searchFile = DeleteObject.do_delete(
                objectId, version, searchFile=None)
            if not success:
                ryw.give_bad_news(
                    'ProcessDiscs: DeleteObject failed: ' +
                    objectId + '#' + str(version), logging.error)
                continue
            else:
                ryw.db_print('process_disk_from_peer_repository: ' +
                             'do_delete succeeded.', 18)
                
            #
            # falls through to continue onto adding the object.
            #
            
        if not UploadObject.uploadobject(metadata, obdata, auxdir,
                                         hasVersion = True):
            ryw.give_bad_news(
                'process_disk_from_peer_repository: ' +
                'UploadObject.uploadobject failed: '+
                repr(metadata) + ' ' + obdata, logging.critical)
            continue
        else:
            ryw.give_news3('ProcessDiscs success: new data uploaded.',
                           logging.info)
            continue

    incomingReverseLists = os.path.join(dir_name, 'ReverseLists')
    existingReverseLists = os.path.join(RepositoryRoot, 'ReverseLists')

    ReverseLists.merge_incoming(
        existingReverseLists, incomingReverseLists, RepositoryRoot,
        searchFile = searchFile)

    if searchFile:
        searchFile.done()
    return True