def in_copy(objname, version, dstDataPath, dstAuxiPath, driveroot, mapDict):
    itemName = objname + '#' + version
    
    success,dirName,dataDir,auxiDir = get_map_entry(driveroot, mapDict,
                                                    itemName)
    if not success:
        return False

    ryw.give_news3('  copying ' + dirName + ' ... ', logging.info)

    try:
        su.copytree(dataDir, dstDataPath)
    except:
        ryw.give_bad_news('in_copy: failed to copy data directory: ' +
                          itemName + ': ' + dataDir, logging.error)
        return False

    logging.debug('in_copy: successfully copied data directory: ' +
                  itemName + ': ' + dataDir)
    
    if os.path.exists(auxiDir):
        try:
            su.copytree(auxiDir, dstAuxiPath)
        except:
            ryw.give_bad_news('in_copy: failed to copy auxi directory: ' +
                              itemName + ': ' + auxiDir, logging.error)
            return False
        logging.debug('in_copy: successfully copied auxi directory: ' +
                      itemName + ': ' + auxiDir)

    return True
def get_data_name_mirror(objDataDir, diskRoot, mapDict, itemName):
    if os.path.exists(objDataDir):
        updatadir = objDataDir
        logging.debug('get_data_name_mirror: found under objects: ' +
                      objDataDir)
    else:
        success,dirName,dataDir,auxiDir = ryw_philips.get_map_entry(
            diskRoot, mapDict, itemName)
        if not success:
            return None
        #ryw.give_news('get_data_name_mirror: found data: ' + dirName,
        #              logging.info)
        ryw.give_news3('found data: ' + dirName, logging.info)
        updatadir = dataDir

    try:    
        ll = os.listdir(updatadir)
    except:
        ryw.give_bad_news('get_data_name_mirror: failed to listdir: ' +
                          updatadir, logging.error)
        return None

    if len(ll) == 1 and os.path.isfile(os.path.join(updatadir, ll[0])):
        obdata = os.path.join(updatadir, ll[0])
    else:
        obdata = updatadir

    logging.debug('get_data_name_mirror: got data name: ' + obdata)
    return obdata
def merge_incoming(existingName, incomingName, repositoryRoot,
                   searchFile=None):
    """called by ProcessDiscs.py"""

    logging.info('merge_incoming: ' + existingName + ' <- ' + incomingName)
    
    if not ryw.is_valid_file(incomingName, 'copy_reverse_lists:'):
        ryw.give_news3('merge_incoming: incoming ReverseLists not found.',
                       logging.info)
        return True

    if not searchFile:
        success,searchFile = ryw.open_search_file(
            'merge_incoming:',
            os.path.join(RepositoryRoot, 'WWW', 'logs'),
            'upload.log',
            os.path.join(RepositoryRoot, 'SearchFile'),
            False)
        if not success:
            if searchFile:
                searchFile.done()
            ryw.give_bad_news('merge_incoming: open search file failed. ',
                              logging.critical)
            return False

    success,existingRL = open_reverse_lists('ReverseLists.merge_incoming:',
                                            '', '', existingName, True,
                                            searchFile = searchFile,
                                            repositoryRoot = repositoryRoot)
    if not success:
        ryw.give_bad_news('merge_incoming: failed to open existing list.',
                          logging.critical)
        if existingRL:
            existingRL.done()
        return False

    success,incomingRL = open_reverse_lists('ReverseLists.merge_incoming:',
                                            '', '', incomingName, False,
                                            skipLk = True,
                                            allowNullSearchFile = True)
    if not success:
        ryw.give_bad_news('merge_incoming: failed to open incoming list.',
                          logging.error)
        if incomingRL:
            incomingRL.done()
        return False
        
    success = existingRL.merge(incomingRL, repositoryRoot)

    existingRL.done()
    incomingRL.done()

    if searchFile:
        searchFile.done()
    return success
def change_meta(meta, newObjStoreRoot):
    if not meta.has_key('objectstore'):
        ryw.give_bad_news3(
            'meta has no object store: ' +
            meta['id'] + '#' + str(meta['version']))
        ryw.give_bad_news3('    but continuing...')
        oldObjStore = ''
    else:
        oldObjStore = meta['objectstore']
        ryw.db_print3(meta['id'] + '#' + str(meta['version']), 64)
        ryw.db_print3(oldObjStore, 64)

    meta['objectstore'] = newObjStoreRoot
    ryw.give_news3(meta['id'] + '#' + str(meta['version']) + ': ' +
                   oldObjStore + ' -> ' + newObjStoreRoot)
def is_data_stub(dataDir):
    if not os.path.exists(dataDir) or not os.path.isdir(dataDir):
        return (True, False)
    try:
        lDir = os.listdir(dataDir)
        if len(lDir) == 1 and lDir[0] == ryw_philips.stubDirName:
            #ryw.give_news('is_data_stub: found stub at: ' + dataDir,
            #              logging.info)
            ryw.give_news3('found stub at: ' + dataDir, logging.info)
            return (True, True)
        else:
            return (True, False)
    except:
        ryw.give_bad_news('is_data_stub failed: ' + dataDir, logging.error)
        return (False, False)
def object_version_is_present(objroot, objid, version):
    paths = objectstore.name_version_to_paths_aux(objroot, objid, version)
    if os.path.exists(paths[3]):
        #ryw.give_news('object_version_is_present: exists: '  +
        #              objid + '#' +
        #              str(version), logging.info)
        ryw.give_news3('object already exists in database: '  +
                       objid + '#' + str(version), logging.info)
        #
        # place to test flushing problem
        #
        #time.sleep(10)
        #
        return True
    else:
        return False
def main():

    #
    # get all the path names straight.
    #
    try:
        ntfsRoot = sys.argv[1]
    except:
        ryw.give_bad_news3(
            'usage: python reformat_ntfs.py /mnt/usb0/Postmanet ' +
            '[new_object_store_path]')
        sys.exit(-1)

    if len(sys.argv) >= 3:
        newObjStoreRoot = sys.argv[2]
    else:
        newObjStoreRoot = NEW_OBJECT_STORE_ROOT

    ryw.db_print3('newObjectStoreRoot is: ' + newObjStoreRoot, 62)
        
    if (not ntfsRoot) or (not os.path.exists(ntfsRoot)):
        ryw.give_bad_news3("can't find NTFS root: " + ntfsRoot)
        sys.exit(-1)

    ryw.db_print3('NTFS root is at: ' + ntfsRoot, 61)

    repositoryRoot = os.path.join(ntfsRoot, 'repository')
    oldSearchFileName = os.path.join(repositoryRoot, 'SearchFile')
    dateTimeRand = ryw.date_time_rand()
    newSearchFileName = os.path.join(repositoryRoot,
                                     'NewSearchFile' + dateTimeRand)

    objectStoreRoot = os.path.join(repositoryRoot, 'WWW',
                                   'ObjectStore')
    if (not os.path.exists(objectStoreRoot)):
        ryw.give_bad_news3("can't find object store: " + objectStoreRoot)
        sys.exit(-1)
    ryw.db_print3('object store root is at: ' + objectStoreRoot, 61)


    #
    # open the new search file.
    #
    try:
        newSearchFileHandle = open(newSearchFileName, 'ab')
    except:
        ryw.give_bad_news3('failed to open new search file: %s\n' % \
                           (searchfile,))
        sys.exit(-1)
    

    #
    # go through all the individual meta files.
    #
    l = glob.glob(objectStoreRoot + "/?/?/?/?/*_META")
    ryw.give_news3('rewriting meta data files...')
    for filename in l:
        ryw.db_print3('found meta: ' + filename, 61)
        meta = load_meta(filename)
        change_meta(meta, newObjStoreRoot)
        rewrite_meta(filename, meta)
        append_to_new_search_file(newSearchFileHandle, meta)



    #
    # replacing the old search file.
    #

    #ryw.copy_file_carefully('/u/rywang/tmp/x1',
    #                        '/u/rywang/tmp/x2',
    #                        '/u/rywang/tmp',
    #                        None,
    #                        'SearchFile_reformat')

    newSearchFileHandle.close()
    ryw.copy_file_carefully(oldSearchFileName, newSearchFileName,
                            repositoryRoot, None, 'SearchFile_reformat')
    ryw.give_news3('replacing search file: ' + oldSearchFileName +
                   ' <- ' + newSearchFileName)
def process_disk_from_peer_repository(dir_name, diskRoot, overwrite=False):

    objectroot = os.path.join(dir_name, 'objects')
    if not os.path.exists(objectroot):
        logging.info(
            'process_disk_from_peer_repository: no objects directory.' +
            objectroot)
        return True
    
    ## Process all incoming objects

    local_object_root = get_local_object_store_root()
    if local_object_root is None:
        return False

    mapDict = ryw_philips.get_map(diskRoot)
    searchFile = None

    for objectId,version in objectstore.objectversioniterator(objectroot):

        ryw.give_news3('----------', logging.info)

        if mapDict == None:
            ryw.give_bad_news(
                'process_disk_from_peer_repository: failed to read map file: '+
                diskRoot, logging.error)
            return False

        #
        # We used to just skip objects already present.
        # now we want to do something.
        #
        #if object_version_is_present(local_object_root, objectId, version):
        #    ## object already present
        #    continue
        objectFound = object_version_is_present(local_object_root,
                                                objectId,
                                                version)

        paths = objectstore.name_version_to_paths_aux(objectroot,
                                                      objectId, version)

        itemName = objectId + '#' + str(version)

        obdata = get_data_name_mirror(paths[0], diskRoot, mapDict, itemName)
        if not obdata:
            continue

        success,isStub = is_data_stub(obdata)
        if not success:
            continue

        metadata = get_metadata_mirror(paths[1])
        if not metadata:
            continue

        auxdir = get_aux_name_mirror(paths[2], diskRoot, mapDict, itemName)

        if isStub and not objectFound:
            #ryw.give_bad_news(
            #    'ProcessDiscs: is a stub but not found in database: '+
            #    itemName, logging.error)
            ryw.give_news3(
                'ProcessDiscs error: is a stub but not found in database: '+
                itemName, logging.error)
            continue
        
        if isStub:
            success,searchFile = deal_with_stub(
                local_object_root, objectId, version, metadata,
                obdata, auxdir, searchFile = searchFile)
            if success:
                ryw.give_news3(
                    'ProcessDiscs success: meta data processed.',
                    logging.info)
            continue

        if objectFound:
            #ryw.give_bad_news(
            #    'ProcessDiscs: not a stub but found in the database: '+
            #    itemName, logging.error)
            ryw.give_news3(
                'ProcessDiscs: not a stub but found in the database: '+
                itemName, logging.error)

            if not overwrite:
                ryw.give_news3('processing skipped.', logging.error)
                continue
            
            #
            # I might want to delete the old version here.
            # using the code from DelObject, should be simple.
            #
            ryw.give_news3('deleting it...', logging.error)

            #
            # at one point, I thought I wanted to be clever and
            # tried to reuse searchFile below.  But the trouble is
            # that the UploadObject.uploadobject() call below will
            # change the SearchFile beneath me, and if I reuse
            # the searchFile here, I end up flushing the incorrectly
            # cached version back to disk. I actually would have
            # expected a deadlock when UploadObject.uploadobject()
            # tries to lock again but the deadlock somehow
            # didn't happen...
            #
            success,searchFile = DeleteObject.do_delete(
                objectId, version, searchFile=None)
            if not success:
                ryw.give_bad_news(
                    'ProcessDiscs: DeleteObject failed: ' +
                    objectId + '#' + str(version), logging.error)
                continue
            else:
                ryw.db_print('process_disk_from_peer_repository: ' +
                             'do_delete succeeded.', 18)
                
            #
            # falls through to continue onto adding the object.
            #
            
        if not UploadObject.uploadobject(metadata, obdata, auxdir,
                                         hasVersion = True):
            ryw.give_bad_news(
                'process_disk_from_peer_repository: ' +
                'UploadObject.uploadobject failed: '+
                repr(metadata) + ' ' + obdata, logging.critical)
            continue
        else:
            ryw.give_news3('ProcessDiscs success: new data uploaded.',
                           logging.info)
            continue

    incomingReverseLists = os.path.join(dir_name, 'ReverseLists')
    existingReverseLists = os.path.join(RepositoryRoot, 'ReverseLists')

    ReverseLists.merge_incoming(
        existingReverseLists, incomingReverseLists, RepositoryRoot,
        searchFile = searchFile)

    if searchFile:
        searchFile.done()
    return True