Esempio n. 1
0
def get_paths1(objroot, objID, version):
    assert(objroot)
    prefix = objectstore.nameversiontoprefix(objroot, objID, version)
    pair   = os.path.split(os.path.normpath(prefix))
    parent = pair[0]
    paths  = objectstore.name_version_to_paths_aux(objroot, objID, version)
    paths  = list(paths)
    paths.append(parent)
    return paths
Esempio n. 2
0
def resize_thumbnails(meta, forced = True):
    try:
        ostore = ryw.get_objectstore(meta)
        if not ostore:
            return
        paths = objectstore.name_version_to_paths_aux(
            ostore, meta['id'], meta['version'])
        auxiDir = paths[2]
        ryw_upload.resize_thumbnails(auxiDir, forced)
    except:
        ryw.give_bad_news('ryw_meta.resize_thumbnails: failed, meta: ' +
                          repr(meta), logging.error)
def object_version_is_present(objroot, objid, version):
    paths = objectstore.name_version_to_paths_aux(objroot, objid, version)
    if os.path.exists(paths[3]):
        #ryw.give_news('object_version_is_present: exists: '  +
        #              objid + '#' +
        #              str(version), logging.info)
        ryw.give_news3('object already exists in database: '  +
                       objid + '#' + str(version), logging.info)
        #
        # place to test flushing problem
        #
        #time.sleep(10)
        #
        return True
    else:
        return False
def deal_with_stub(localObjRoot, objID, version,
                   metaData, dataDir, auxDir, searchFile = None):
    """thoughts regarding how many (unnecessary) times I'm going through
    the SearchFile... if there's data, the time spent on metadata is
    probably not a big deal...  so only this one, when we're only dealing
    with metadata, somewhat matters.  I think I'm still going to write
    through, to make things simpler.  the only optimization to take care
    of is to not to re-open the SearchFile over and over again."""
    
    success,searchFile = EditObject.do_update_metadata(
        localObjRoot, objID, version, metaData, searchFile = searchFile)
    if not success:
        ryw.give_bad_news('ProcessDiscs: EditObject failed: ' +
                          objID + '#' + str(version), logging.error)
        return (False, None)

    if not os.path.exists(auxDir):
        return (True, searchFile)

    oldPaths = objectstore.name_version_to_paths_aux(
        localObjRoot, objID, version)
    oldAuxDir = oldPaths[2]

    try:
        #ryw.give_news3('auxDir: ' + auxDir, logging.info)
        #ryw.give_news3('oldAuxDir: ' + oldAuxDir, logging.info)

        #ryw.give_news3('force remove: ' + oldAuxDir, logging.info)
        ryw.force_remove(oldAuxDir, 'deal_with_found_objects')
        #ryw.give_news3('recursive copy: ' + auxDir + ' -> ' + oldAuxDir,
        #               logging.info)
        shutil.copytree(auxDir, oldAuxDir)
    except:
        ryw.give_bad_news('ProcessDiscs: failed to overwrite auxi dirs: ' +
                          auxDir + ' -> ' + oldAuxDir, logging.critical)
        return (False, searchFile)

    return (True, searchFile)
def get_DVD_URLs(item, itempaths, mapDict):
    try:
        objID,version = item.split('#')
        version = int(version)

        dvdObjRoot = '..\\objects'
        paths = objectstore.name_version_to_paths_aux(
            dvdObjRoot, objID, version)
        datapath = os.path.normpath(paths[0])
        datapath = datapath.replace('\\', '/')
        auxipath = os.path.normpath(paths[2])
        auxipath = auxipath.replace('\\', '/')
        datapath = urllib.quote(datapath)
        auxipath = urllib.quote(auxipath)
        #auxidir  = os.path.normpath(paths[2])
        auxidir  = os.path.normpath(itempaths[2])
        logging.debug('get_DVD_URLs: ' + datapath + ' ' + auxipath + ' ' +
                      auxidir)
        return (True, datapath, auxipath, auxidir)
    except:
        ryw.give_bad_news('get_DVD_URLs: failed for: ' + item,
                          logging.critical)
        return (False, None, None, None)
Esempio n. 6
0
def get_meta(objroot, objname, version):
    """uses the file system to get meta data instead of getting it from
    either the SearchFile or SearchServer.  should be a bit faster if
    there is no need to read all the metadata.  just a performance issue:
    should not be a robustness issue now that we have gotten rid of the
    SearchServers."""

    logging.debug("ryw.get_meta: " + objroot + " " + objname + " " + str(version))

    try:
        # paths = objectstore.nameversiontopaths(objroot, objname, version)
        paths = objectstore.name_version_to_paths_aux(objroot, objname, version)

        if not good_repo_paths(paths):
            logging.warning("ryw.get_meta: good_repo_paths failed.")
            return (False, None)

        metapath = paths[1]
        meta = su.pickload(metapath)
        logging.debug("ryw.get_meta: success.")
        return (True, meta)
    except:
        logging.warning("ryw.get_meta: failed.")
        return (False, None)
def process_disk_from_peer_repository(dir_name, diskRoot, overwrite=False):

    objectroot = os.path.join(dir_name, 'objects')
    if not os.path.exists(objectroot):
        logging.info(
            'process_disk_from_peer_repository: no objects directory.' +
            objectroot)
        return True
    
    ## Process all incoming objects

    local_object_root = get_local_object_store_root()
    if local_object_root is None:
        return False

    mapDict = ryw_philips.get_map(diskRoot)
    searchFile = None

    for objectId,version in objectstore.objectversioniterator(objectroot):

        ryw.give_news3('----------', logging.info)

        if mapDict == None:
            ryw.give_bad_news(
                'process_disk_from_peer_repository: failed to read map file: '+
                diskRoot, logging.error)
            return False

        #
        # We used to just skip objects already present.
        # now we want to do something.
        #
        #if object_version_is_present(local_object_root, objectId, version):
        #    ## object already present
        #    continue
        objectFound = object_version_is_present(local_object_root,
                                                objectId,
                                                version)

        paths = objectstore.name_version_to_paths_aux(objectroot,
                                                      objectId, version)

        itemName = objectId + '#' + str(version)

        obdata = get_data_name_mirror(paths[0], diskRoot, mapDict, itemName)
        if not obdata:
            continue

        success,isStub = is_data_stub(obdata)
        if not success:
            continue

        metadata = get_metadata_mirror(paths[1])
        if not metadata:
            continue

        auxdir = get_aux_name_mirror(paths[2], diskRoot, mapDict, itemName)

        if isStub and not objectFound:
            #ryw.give_bad_news(
            #    'ProcessDiscs: is a stub but not found in database: '+
            #    itemName, logging.error)
            ryw.give_news3(
                'ProcessDiscs error: is a stub but not found in database: '+
                itemName, logging.error)
            continue
        
        if isStub:
            success,searchFile = deal_with_stub(
                local_object_root, objectId, version, metadata,
                obdata, auxdir, searchFile = searchFile)
            if success:
                ryw.give_news3(
                    'ProcessDiscs success: meta data processed.',
                    logging.info)
            continue

        if objectFound:
            #ryw.give_bad_news(
            #    'ProcessDiscs: not a stub but found in the database: '+
            #    itemName, logging.error)
            ryw.give_news3(
                'ProcessDiscs: not a stub but found in the database: '+
                itemName, logging.error)

            if not overwrite:
                ryw.give_news3('processing skipped.', logging.error)
                continue
            
            #
            # I might want to delete the old version here.
            # using the code from DelObject, should be simple.
            #
            ryw.give_news3('deleting it...', logging.error)

            #
            # at one point, I thought I wanted to be clever and
            # tried to reuse searchFile below.  But the trouble is
            # that the UploadObject.uploadobject() call below will
            # change the SearchFile beneath me, and if I reuse
            # the searchFile here, I end up flushing the incorrectly
            # cached version back to disk. I actually would have
            # expected a deadlock when UploadObject.uploadobject()
            # tries to lock again but the deadlock somehow
            # didn't happen...
            #
            success,searchFile = DeleteObject.do_delete(
                objectId, version, searchFile=None)
            if not success:
                ryw.give_bad_news(
                    'ProcessDiscs: DeleteObject failed: ' +
                    objectId + '#' + str(version), logging.error)
                continue
            else:
                ryw.db_print('process_disk_from_peer_repository: ' +
                             'do_delete succeeded.', 18)
                
            #
            # falls through to continue onto adding the object.
            #
            
        if not UploadObject.uploadobject(metadata, obdata, auxdir,
                                         hasVersion = True):
            ryw.give_bad_news(
                'process_disk_from_peer_repository: ' +
                'UploadObject.uploadobject failed: '+
                repr(metadata) + ' ' + obdata, logging.critical)
            continue
        else:
            ryw.give_news3('ProcessDiscs success: new data uploaded.',
                           logging.info)
            continue

    incomingReverseLists = os.path.join(dir_name, 'ReverseLists')
    existingReverseLists = os.path.join(RepositoryRoot, 'ReverseLists')

    ReverseLists.merge_incoming(
        existingReverseLists, incomingReverseLists, RepositoryRoot,
        searchFile = searchFile)

    if searchFile:
        searchFile.done()
    return True
Esempio n. 8
0
def collect_req_info(reqs, objKB):
    logging.debug('collect_req_info: entered...')

    success,searchFile = ryw.open_search_file(
        'collect_req_info:',
        os.path.join(RepositoryRoot, 'WWW', 'logs'),
        'upload.log',
        os.path.join(RepositoryRoot, 'SearchFile'),
        False)
    if not success:
        return(False, None, None, None)

    reqsize = {}
    reqpath = {}
    reqList = []
    for item in reqs:
        logging.debug('collect_req_info: item is: '+item)
        try:
            objname, version = item.split('#')
            version = int(version)
        except:
            ryw.give_bad_news(
                'collect_req_info: bad format, split failed: '+item,
                logging.error)
            continue
            
        logging.debug('collect_req_info, obj, version: ' +
                      objname + ' ' + repr(version))

        success,metaData = searchFile.get_meta(objname, version)
        if not success:
            ryw.give_bad_news(
                'collect_req_info: failed to get_meta.',
                logging.error)
            continue
            
        #
        # I'm doing this to hardwire all
        # places of gettting objectstoreroot.
        #
        #objroot = metaData['objectstore']
        objroot = ryw.hard_wired_objectstore_root()

        try:
            itempath = objectstore.name_version_to_paths_aux(objroot, objname,
                                                             version)
        except:
            ryw.give_bad_news(
                'collect_req_info: nameversiontopaths failed: ' +
                objroot + ' ' + objname + ' ' + repr(version),
                logging.critical)
            continue
                
        logging.debug('collect_req_info: after getting itempath...' +
                      repr(itempath))

        if not ryw.good_repo_paths(itempath):
            ryw.give_bad_news('collect_req_info: check_obj_paths failed.',
                              logging.error)
            continue

        success,itemSize = ryw.get_obj_size(itempath)
        if not success:
            continue
        
        logging.debug('collect_req_info, size in KB is: ' + repr(itemSize))

        if (itemSize > ryw.maxSizeInKB - objKB):
            ryw.give_bad_news(
                'collect_req_info: item size too big to fit on one disc: ' +
                repr(itemSize), logging.error)
            continue

        reqsize[item] = itemSize
        reqpath[item] = itempath
        logging.debug('collect_req_info: size, path: ' +
                      repr(itemSize) + ' ' + itempath[0])

        # build a list for sorting.
        reqItem = {}
        reqItem['name'] = item
        if metaData.has_key('upload_datetime'):
            reqItem['upload_datetime'] = metaData['upload_datetime']
        reqList.append(reqItem)

    searchFile.done()
    reqList.sort(key = ryw.datetimesortkey, reverse = False)

    sortedItems = []
    for r in reqList:
        sortedItems.append(r['name'])
    
    return (True, reqsize, reqpath, sortedItems)
def copy_objects(items, itempath, repDir, tmpImgDir, metaOnly = False):
    ryw.give_news2('copying requested objects...   ', logging.info)
    #
    # quite a bit of the copying details should be pulled out to a piece
    # of common code.
    #

    if len(items) == 0:
        ryw.give_news2('no requested objects. ', logging.info)
        ryw.give_news2('<BR>', logging.info)
        return True    

    success,dataRoot,auxiRoot,mapDict,counter = ryw_philips.out_init(tmpImgDir)
    if not success:
        ryw.give_bad_news('copy_objects: out_init failed.', logging.error)
        return True

    for item in items:
        logging.debug('copy_objects: item: ' + item)
        try:
            objname,version = item.split('#')
            version = int(version)
            logging.debug('copy_objects: got name, version: ' +
                          objname + ' ' + repr(version))
            #destpath = objectstore.nameversiontopaths(
            #    os.path.join(repDir, 'objects'), objname, version)
            destpath = objectstore.name_version_to_paths_aux(
                os.path.join(repDir, 'objects'), objname, version)
            logging.debug('copy_objects: got destpath: ' + repr(destpath))

            if not ryw.good_repo_paths(itempath[item]):
                ryw.give_bad_news('copy_objects: good_repo_paths failed.',
                                  logging.error)
                raise('something missing in the source paths.')

            su.createparentdirpath(destpath[0])
            logging.debug('copy_objects: created parent dir: ' + destpath[0])

            # code prior to preparing for Philips DVD player
            # su.copytree(itempath[item][0], destpath[0])
            # logging.debug('copy_objects: done copying data: ' + destpath[0])

            counter,mapDict = \
                ryw_philips.out_copy(item,
                                     itempath[item][0], itempath[item][1],
                                     itempath[item][2], counter,
                                     dataRoot, auxiRoot, mapDict,
                                     onlyMeta = metaOnly)

            shutil.copyfile(itempath[item][1], destpath[1])
            logging.debug('copy_objects: done copying metadata: ' +
                          destpath[1])

            # code prior to preparing for Philips DVD player
            # if os.path.exists(itempath[item][2]):
            #     su.copytree(itempath[item][2], destpath[2])
            #     logging.debug('copy_objects: done copying auxi files: ' +
            #                   destpath[2])
                
            shutil.copyfile(itempath[item][3], destpath[3])
            logging.debug('copy_objects: done copying done flag: ' +
                          destpath[3])
            
        except:
            ryw.give_bad_news(
                'copy_objects: failed to copy data: ' + item, logging.critical)
            ryw.give_bad_news(
                'copy_objects: skip copying an object and continue.',
                logging.error)
            continue

    ryw_philips.out_done(tmpImgDir, mapDict)

    ryw.give_news2('done. ', logging.info)
    ryw.give_news2('<BR>', logging.info)
    return True