def try_insert( new_dir_index ): rc = yield cls.__alloc( volume_id, parent_id, file_id, new_dir_index, generation, total_attempt_count, async=True ) if rc: if new_dir_index >= num_children: # compactify--see if we can shift it closer rc, final_dir_index = cls.__compactify_on_insert( volume_id, parent_id, file_id, new_dir_index, num_shards ) if rc == 0: storagetypes.concurrent_return( (0, generation) ) elif rc == -errno.EAGAIN: # try again storagetypes.concurrent_return( (-errno.EAGAIN, final_dir_index) ) else: storagetypes.concurrent_return( (rc, None) ) else: logging.info("Directory /{}/{}: inserted {} correctly the first time".format(volume_id, parent_id, file_id)) storagetypes.concurrent_return( (0, generation) ) else: logging.info("Directory /%s/%s: Failed to insert /%s/%s (capacity %s) at %s; will need to retry (attempts=%s)" % (volume_id, parent_id, volume_id, file_id, parent_capacity, new_dir_index, total_attempt_count) ) # probably collided. Try again, and have the caller pick a different index storagetypes.concurrent_return( (-errno.EAGAIN, None) )
def __update_or_alloc_async( cls, volume_id, parent_id, file_id, dir_index, generation, alloced ): """ Update or allocate the index node pair and/or set the directory index node's allocation status, asynchronously. If the directory index node does not exist, it and its entry index node will be created and the allocation status set accordingly. If the directory index node exists, but has a different allocation status, then the allocation status will be set accordingly. If we succeed in allocating a new index node, incremenet the number of children in the parent directory. Return True on success. Return False if the index node existed, but the file_id did not match its record or the allocation status did not change. """ index_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, dir_index ) nonce = random.randint( -2**63, 2**63 - 1 ) result = True idx = yield MSEntryDirEntIndex.get_or_insert_async( index_key_name, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, generation=generation, alloced=alloced, nonce=nonce ) if idx.nonce == nonce: # created. if alloced: logging.info("Directory /%s/%s: allocated index slot for /%s/%s at %s" % (volume_id, parent_id, volume_id, file_id, dir_index)) else: logging.info("Directory /%s/%s: freed index slot at %s" % (volume_id, parent_id, dir_index)) # need to create an entry index node as well. entry_key_name = MSEntryEntDirIndex.make_key_name( volume_id, file_id ) entry_key = storagetypes.make_key( MSEntryEntDirIndex, entry_key_name ) entry_idx = MSEntryEntDirIndex( key=entry_key, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, generation=generation, alloced=alloced, nonce=nonce ) yield entry_idx.put_async() else: # already exists. changing allocation status? if idx.alloced != alloced: # allocation status needs to be changed # want to change allocation status rc = yield storagetypes.transaction_async( lambda: cls.__update_index_node_async( volume_id, parent_id, file_id, dir_index, alloced, generation=generation ), xg=True ) if rc == 0: result = True else: logging.error("__update_index_node_async(/%s/%s file_id=%s dir_index=%s alloced=%s) rc = %s" % (volume_id, parent_id, file_id, dir_index, alloced, rc )) result = False else: if alloced and idx.file_id != file_id: # collision on insertion logging.error("Directory /%s/%s: collision inserting /%s/%s at %s (occupied by /%s/%s)" % (volume_id, parent_id, volume_id, file_id, dir_index, volume_id, idx.file_id)) result = False else: # created/set correctly result = True storagetypes.concurrent_return( result )
def read_and_cache(): idx = yield idx_key.get_async() if idx is not None and idx.alloced: MSEntryIndex.SetCache( idx ) storagetypes.concurrent_return( idx )
def delete_index_if_unallocated(): idx_node = yield idx_key.get_async( use_cache=False, use_memcache=False ) if not idx_node.alloced: yield idx_key.delete_async() storagetypes.concurrent_return( 0 )
def do_delete(): rc = yield cls.__free( volume_id, parent_id, file_id, dir_index, async=True ) if not rc: logging.error("Failed to free index node /%s/%s (%s,%s)" % (volume_id, parent_id, file_id, dir_index)) storagetypes.concurrent_return( -errno.EAGAIN ) cls.__compactify_on_delete( volume_id, parent_id, file_id, dir_index, num_shards, retry=retry, compactify_continuation=compactify_continuation ) storagetypes.concurrent_return( 0 )
def __update_index_node_async( cls, volume_id, parent_id, file_id, dir_index, alloced, **attrs ): """ Set the allocation status of a directory index node (but not its matching entry index node). Return 0 on success Return -EINVAL if the given file_id doesn't match the directory index node's file_id Return -EEXIST if the given directory index node's allocation status is the same as alloced """ index_key_name = MSEntryDirEntIndex.make_key_name( volume_id, parent_id, dir_index ) index_key = storagetypes.make_key( MSEntryDirEntIndex, index_key_name ) old_alloced = None idx = yield index_key.get_async() if idx is None: old_alloced = alloced idx = MSEntryDirEntIndex( key=index_key, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, alloced=alloced, **attrs ) else: if idx.file_id != file_id: # wrong node storagetypes.concurrent_return( -errno.EINVAL ) old_alloced = idx.alloced if old_alloced != alloced: # changing allocation status idx.populate( -1, volume_id=volume_id, parent_id=parent_id, file_id=file_id, dir_index=dir_index, alloced=alloced, **attrs ) yield idx.put_async() storagetypes.concurrent_return( 0 ) else: storagetypes.concurrent_return( -errno.EEXIST )
def Read_Async( cls, key, deleted=False ): gw = yield key.get_async() if gw is None: storagetypes.concurrent_return(None) if gw.deleted and not deleted: storagetypes.concurrent_return(None) storagetypes.concurrent_return(gw)
def Read_Async(cls, key, deleted=False): gw = yield key.get_async() if gw is None: storagetypes.concurrent_return(None) if gw.deleted and not deleted: storagetypes.concurrent_return(None) storagetypes.concurrent_return(gw)
def swap( free_file_id ): rc, alloced_idx, free_idx_file_id = yield storagetypes.transaction_async( lambda: do_swap( free_file_id ), xg=True ) if rc < 0: storagetypes.concurrent_return( rc ) old_dir_index = None if free_file_id is None: free_file_id = free_idx_file_id if free_file_id is not None: # blow away the newly-freed index node old_entry_idx_key_name = MSEntryEntDirIndex.make_key_name( volume_id, free_file_id ) old_entry_idx_key = storagetypes.make_key( MSEntryEntDirIndex, old_entry_idx_key_name ) yield old_entry_idx_key.delete_async() storagetypes.memcache.delete( old_entry_idx_key_name ) old_dir_index = alloced_idx.dir_index storagetypes.concurrent_return( old_dir_index )
def __read_node( cls, file_id, index, idx_key, check_file_id=True ): """ Read a dir-index node, given its key. Return (rc, idx): * return -ENOENT if the index node doesn't exist * return -EINVAL if the file IDs don't match, and check_file_id is true * return -EPERM if the directory indexes don't match """ idx = yield idx_key.get_async( use_cache=False, use_memcache=False ) if idx is None: storagetypes.concurrent_return( (-errno.ENOENT, None) ) if check_file_id and idx.file_id != file_id: storagetypes.concurrent_return( (-errno.EINVAL, None) ) if idx.dir_index != index: storagetypes.concurrent_return( (-errno.EPERM, None) ) storagetypes.concurrent_return( (0, idx) )
def do_swap( free_file_id ): # confirm that the allocated directory index node and free directory index node still exist free_idx_data = None free_idx_rc = None free_idx = None free_idx_file_id = None check_free_file_id = True if free_file_id is None: check_free_file_id = False alloced_idx_data, free_idx_data = yield cls.__read_dirent_node( volume_id, parent_id, alloced_file_id, alloced_dir_index, async=True ), \ cls.__read_dirent_node( volume_id, parent_id, free_file_id, free_dir_index, async=True, check_file_id=check_free_file_id ) alloced_idx_rc, alloced_idx = alloced_idx_data if free_idx_data is not None: free_idx_rc, free_idx = free_idx_data # possible that we raced another compactify operation and lost (in which case the allocated node might be different than what we expect) if alloced_idx_rc != 0: logging.error("/%s/%s: alloced index (/%s/%s, %s) rc = %s" % (volume_id, parent_id, volume_id, alloced_file_id, alloced_dir_index, alloced_idx_rc) ) storagetypes.concurrent_return( (-errno.EAGAIN, None, None) ) elif not alloced_idx.alloced: logging.error("/%s/%s: alloced index (/%s/%s, %s) is free" % (volume_id, parent_id, volume_id, alloced_file_id, alloced_dir_index) ) storagetypes.concurrent_return( (-errno.ESTALE, None, None) ) if free_idx_data is not None: if free_idx_rc != 0: if free_idx_rc == -errno.ENOENT: # the entry doesn't exist, which is fine by us since we're about to overwrite it anyway free_idx_rc = None free_idx = None free_idx_data = None else: logging.error("/%s/%s: __read_dirent_node(/%s/%s, %s) rc = %s" % (volume_id, parent_id, volume_id, free_file_id, free_dir_index, free_idx_rc) ) storagetypes.concurrent_return( (free_idx_rc, None, None) ) elif free_idx.alloced: logging.error("/%s/%s: free index (/%s/%s, %s) is allocated" % (volume_id, parent_id, volume_id, free_idx.file_id, free_dir_index) ) storagetypes.concurrent_return( (-errno.ESTALE, None, None) ) elif free_idx.dir_index != free_dir_index: raise Exception("/%s/%s: free index slot mismatch: %s != %s" % (volume_id, free_file_id, free_idx.dir_index, free_dir_index)) else: # save this for later... free_idx_file_id = free_idx.file_id # sanity check if alloced_idx.dir_index != alloced_dir_index: raise Exception("/%s/%s: allocated index slot mismatch: %s != %s" % (volume_id, alloced_file_id, alloced_idx.dir_index, alloced_dir_index)) # do the swap: # * overwrite the free dir index node with the allocated dir index node's data (moving it into place over the freed one) # * update the alloced ent node with the free dir index node's dir index (compactifying the index) new_dir_idx = MSEntryDirEntIndex( key=free_idx_key, **alloced_idx.to_dict() ) new_entry_dir_idx = MSEntryEntDirIndex( key=alloced_entry_idx_key, **alloced_idx.to_dict() ) # overwrites existing entry index node new_dir_idx.dir_index = free_dir_index new_entry_dir_idx.dir_index = free_dir_index logging.info( "swap index slot of /%s/%s: slot %s --> slot %s (overwrites %s)" % (volume_id, alloced_file_id, alloced_dir_index, free_dir_index, free_file_id) ) yield new_dir_idx.put_async(), new_entry_dir_idx.put_async(), alloced_idx.key.delete_async() storagetypes.concurrent_return( (0, alloced_idx, free_idx_file_id) )