def delete_volume_and_friends( cls, volume_id, volume_name ): """ Delete the following for a particular volume, as a deferred task: the Volume # all Volume access requests the Volume name holder Does not delete attached gateways. """ futs = [] # delete volume volume_key = storagetypes.make_key( Volume, Volume.make_key_name( volume_id=volume_id ) ) futs.append( volume_key.delete_async() ) # delete volume nameholder volume_nameholder_key = storagetypes.make_key( VolumeNameHolder, VolumeNameHolder.make_key_name( volume_name ) ) futs.append( volume_nameholder_key.delete_async() ) # delete volume access requests #volume_access_requests_fut = VolumeAccessRequest.DeleteAccessRequestsByVolume( volume_id, async=True ) #futs.append( volume_access_requests_fut ) storagetypes.wait_futures( futs )
def Delete( cls, g_name_or_id ): """ Given a gateway ID, delete the corresponding gateway. Unref the driver as well. """ gateway = Gateway.Read( g_name_or_id ) if gateway: g_id = gateway.g_id else: raise Exception("No such Gateway '%s'" % g_name_or_id ) key_name = Gateway.make_key_name( g_id=g_id ) g_key = storagetypes.make_key( cls, key_name ) g_name_key = storagetypes.make_key( GatewayNameHolder, GatewayNameHolder.make_key_name( gateway.name ) ) g_delete_fut = g_key.delete_async() g_name_delete_fut = g_name_key.delete_async() driver_fut = GatewayDriver.unref_async( gateway.driver_hash ) storagetypes.wait_futures( [g_delete_fut, g_name_delete_fut, driver_fut] ) Gateway.FlushCache( g_id ) Gateway.FlushCacheDriver( gateway.driver_hash ) g_name_to_id_cache_key = Gateway.Read_ByName_name_cache_key( g_name_or_id ) storagetypes.memcache.delete( g_name_to_id_cache_key ) return True
def Delete(cls, cls_name_or_id): """ Given a closure ID, delete the corresponding closure. NOTE: Make sure that no gateway references this closure first. """ closure = Closure.Read(cls_name_or_id) if closure is not None: cls_id = closure.closure_id else: raise Exception("No such Closure '%s'" % cls_name_or_id) key_name = Closure.make_key_name(closure_id=cls_id) cls_key = storagetypes.make_key(cls, key_name) cls_name_key = storagetypes.make_key( ClosureNameHolder, ClosureNameHolder.make_key_name(closure.name)) cls_delete_fut = cls_key.delete_async() cls_name_delete_fut = cls_name_key.delete_async() Closure.FlushCache(cls_id) cls_name_to_id_cache_key = Closure.Read_ByName_name_cache_key( cls_name_or_id) storagetypes.memcache.delete(cls_name_to_id_cache_key) storagetypes.wait_futures([cls_delete_fut, cls_name_delete_fut]) return True
def delete_volume_and_friends(cls, volume_id, volume_name): """ Delete the following for a particular volume, as a deferred task: the Volume all Volume access requests the Volume name holder """ futs = [] # delete volume volume_key = storagetypes.make_key( Volume, Volume.make_key_name(volume_id=volume_id)) futs.append(volume_key.delete_async()) # delete volume nameholder volume_nameholder_key = storagetypes.make_key( VolumeNameHolder, VolumeNameHolder.make_key_name(volume_name)) futs.append(volume_nameholder_key.delete_async()) # delete volume access requests volume_access_requests_fut = VolumeAccessRequest.DeleteAccessRequestsByVolume( volume_id, async=True) futs.append(volume_access_requests_fut) storagetypes.wait_futures(futs)
def Delete( cls, cls_name_or_id ): """ Given a closure ID, delete the corresponding closure. NOTE: Make sure that no gateway references this closure first. """ closure = Closure.Read( cls_name_or_id ) if closure is not None: cls_id = closure.closure_id else: raise Exception("No such Closure '%s'" % cls_name_or_id ) key_name = Closure.make_key_name( closure_id=cls_id ) cls_key = storagetypes.make_key( cls, key_name ) cls_name_key = storagetypes.make_key( ClosureNameHolder, ClosureNameHolder.make_key_name( closure.name ) ) cls_delete_fut = cls_key.delete_async() cls_name_delete_fut = cls_name_key.delete_async() Closure.FlushCache( cls_id ) cls_name_to_id_cache_key = Closure.Read_ByName_name_cache_key( cls_name_or_id ) storagetypes.memcache.delete( cls_name_to_id_cache_key ) storagetypes.wait_futures( [cls_delete_fut, cls_name_delete_fut] ) return True
def Delete(cls, g_name_or_id): """ Given a gateway ID, delete the corresponding gateway """ gateway = Gateway.Read(g_name_or_id) if gateway: g_id = gateway.g_id else: raise Exception("No such Gateway '%s'" % g_name_or_id) key_name = Gateway.make_key_name(g_id=g_id) g_key = storagetypes.make_key(cls, key_name) g_name_key = storagetypes.make_key( GatewayNameHolder, GatewayNameHolder.make_key_name(gateway.name)) g_delete_fut = g_key.delete_async() g_name_delete_fut = g_name_key.delete_async() Gateway.FlushCache(g_id) g_name_to_id_cache_key = Gateway.Read_ByName_name_cache_key( g_name_or_id) storagetypes.memcache.delete(g_name_to_id_cache_key) storagetypes.wait_futures([g_delete_fut, g_name_delete_fut]) return True
def Create(cls, user, **kwargs): """ Create a closure. Only do this after the closure binary has been uploaded successfully. """ # enforce ownership--make sure the calling user owns this closure kwargs['owner_id'] = user.owner_id # populate kwargs with default values for missing attrs cls.fill_defaults(kwargs) # sanity check: do we have everything we need? missing = cls.find_missing_attrs(kwargs) if len(missing) != 0: raise Exception("Missing attributes: %s" % (", ".join(missing))) # sanity check: are our fields valid? invalid = cls.validate_fields(kwargs) if len(invalid) != 0: raise Exception("Invalid values for fields: %s" % (", ".join(invalid))) # ID... closure_id = random.randint(0, 2**63 - 1) kwargs['closure_id'] = closure_id closure_key_name = Closure.make_key_name(closure_id=closure_id) closure_key = storagetypes.make_key(cls, closure_key_name) # create a nameholder and this closure at once---there's a good chance we'll succeed closure_nameholder_fut = ClosureNameHolder.create_async( kwargs['name'], closure_id) closure_fut = cls.get_or_insert_async(closure_key_name, **kwargs) # wait for operations to complete storagetypes.wait_futures([closure_nameholder_fut, closure_fut]) # check for collision... closure_nameholder = closure_nameholder_fut.get_result() closure = closure_fut.get_result() if closure_nameholder.closure_id != closure_id: # name collision... storagetypes.deferred.defer(Closure.delete_all, [closure_key]) raise Exception("Closure '%s' already exists!" % kwargs['name']) if closure.closure_id != closure_id: # ID collision... storagetypes.deferred.defer(Closure.delete_all, [closure_nameholder.key, closure_key]) raise Exception("Closure ID collision. Please try again.") # we're good! return closure_key
def SetUGCaps_ByVolume( cls, volume_id, caps ): """ Set the capabilities of all of a Volume's UGs. """ def set_caps_func( gw ): gw.caps = caps gw.need_cert = Gateway.needs_cert( GATEWAY_TYPE_UG, caps ) return gw.put_async() gw_futs = cls.ListAll( {"Gateway.gateway_type ==": GATEWAY_TYPE_UG, "Gateway.volume_id ==": volume_id}, map_func=set_caps_func ) storagetypes.wait_futures( gw_futs ) return True
def Create( cls, user, **kwargs ): """ Create a closure. Only do this after the closure binary has been uploaded successfully. """ # enforce ownership--make sure the calling user owns this closure kwargs['owner_id'] = user.owner_id # populate kwargs with default values for missing attrs cls.fill_defaults( kwargs ) # sanity check: do we have everything we need? missing = cls.find_missing_attrs( kwargs ) if len(missing) != 0: raise Exception( "Missing attributes: %s" % (", ".join( missing ))) # sanity check: are our fields valid? invalid = cls.validate_fields( kwargs ) if len(invalid) != 0: raise Exception( "Invalid values for fields: %s" % (", ".join( invalid )) ) # ID... closure_id = random.randint( 0, 2**63 - 1 ) kwargs['closure_id'] = closure_id closure_key_name = Closure.make_key_name( closure_id=closure_id ) closure_key = storagetypes.make_key( cls, closure_key_name ) # create a nameholder and this closure at once---there's a good chance we'll succeed closure_nameholder_fut = ClosureNameHolder.create_async( kwargs['name'], closure_id ) closure_fut = cls.get_or_insert_async( closure_key_name, **kwargs ) # wait for operations to complete storagetypes.wait_futures( [closure_nameholder_fut, closure_fut] ) # check for collision... closure_nameholder = closure_nameholder_fut.get_result() closure = closure_fut.get_result() if closure_nameholder.closure_id != closure_id: # name collision... storagetypes.deferred.defer( Closure.delete_all, [closure_key] ) raise Exception( "Closure '%s' already exists!" % kwargs['name'] ) if closure.closure_id != closure_id: # ID collision... storagetypes.deferred.defer( Closure.delete_all, [closure_nameholder.key, closure_key] ) raise Exception( "Closure ID collision. Please try again." ) # we're good! return closure_key
def SetUGCaps_ByVolume(cls, volume_id, caps): """ Set the capabilities of all of a Volume's UGs. """ def set_caps_func(gw): gw.caps = caps gw.need_cert = Gateway.needs_cert(GATEWAY_TYPE_UG, caps) return gw.put_async() gw_futs = cls.ListAll( { "Gateway.gateway_type ==": GATEWAY_TYPE_UG, "Gateway.volume_id ==": volume_id }, map_func=set_caps_func) storagetypes.wait_futures(gw_futs) return True
def Delete(cls, g_name_or_id): """ Given a gateway ID, delete the corresponding gateway. That is, set it's "deleted" flag so it no longer gets read. Unref the driver as well. """ gateway = Gateway.Read(g_name_or_id) if gateway: g_id = gateway.g_id else: return True key_name = Gateway.make_key_name(g_id=g_id) def set_deleted(): # atomically set the gateway to deleted g_key = storagetypes.make_key(cls, key_name) gw = g_key.get() if gw is None: return None gw.deleted = True gw.put() return gw.key storagetypes.transaction(lambda: set_deleted()) g_name_key = storagetypes.make_key( GatewayNameHolder, GatewayNameHolder.make_key_name(gateway.name)) g_name_delete_fut = g_name_key.delete_async() driver_fut = GatewayDriver.unref_async(gateway.driver_hash) storagetypes.wait_futures([g_name_delete_fut, driver_fut]) Gateway.FlushCache(g_id) Gateway.FlushCacheDriver(gateway.driver_hash) g_name_to_id_cache_key = Gateway.Read_ByName_name_cache_key( g_name_or_id) storagetypes.memcache.delete(g_name_to_id_cache_key) return True
def Delete( cls, g_name_or_id ): """ Given a gateway ID, delete the corresponding gateway. That is, set it's "deleted" flag so it no longer gets read. Unref the driver as well. """ gateway = Gateway.Read( g_name_or_id ) if gateway: g_id = gateway.g_id else: raise Exception("No such Gateway '%s'" % g_name_or_id ) key_name = Gateway.make_key_name( g_id=g_id ) def set_deleted(): # atomically set the gateway to deleted g_key = storagetypes.make_key( cls, key_name ) gw = g_key.get() if gw is None: return None gw.deleted = True gw.put() return gw.key storagetypes.transaction( lambda: set_deleted() ) g_name_key = storagetypes.make_key( GatewayNameHolder, GatewayNameHolder.make_key_name( gateway.name ) ) g_name_delete_fut = g_name_key.delete_async() driver_fut = GatewayDriver.unref_async( gateway.driver_hash ) storagetypes.wait_futures( [g_name_delete_fut, driver_fut] ) Gateway.FlushCache( g_id ) Gateway.FlushCacheDriver( gateway.driver_hash ) g_name_to_id_cache_key = Gateway.Read_ByName_name_cache_key( g_name_or_id ) storagetypes.memcache.delete( g_name_to_id_cache_key ) return True
def inner(caller_user, *args, **kw): if caller_user == None: # authentication failed raise Exception("Caller has insufficient privileges") if not is_user( caller_user ): # not a user raise Exception("Caller is not a user") if self.admin_only: assert_admin( caller_user ) source_object_fut = None target_object_fut = None futs = [] # get the source object source_object_id = object_id_from_name( self.source_object_name, func, args, kw ) source_object_fut = self.source_object_cls.Read( source_object_id, async=True ) futs.append( source_object_fut ) # get the target object target_object_id = object_id_from_name( self.target_object_name, func, args, kw ) target_object_fut = self.target_object_cls.Read( target_object_id, async=True ) futs.append( target_object_fut ) storagetypes.wait_futures( futs ) source_object = None target_object = None if source_object_fut != None: source_object = source_object_fut.get_result() if target_object_fut != None: target_object = target_object_fut.get_result() # check the source object... source_object_id = object_id_from_name( self.source_object_name, func, args, kw ) if source_object_id is None: raise Exception("BUG: No %s ID given" % self.source_object_cls.__name__) if source_object == None: raise Exception("Source object '%s' does not exist" % source_object_id ) if self.caller_owns_source and not source_object.owned_by( caller_user ) and not caller_user.is_admin: raise Exception("Source object '%s' is not owned by '%s'" % (source_object_id, caller_user.email) ) # check the target object... target_object_id = object_id_from_name( self.target_object_name, func, args, kw ) if target_object_id is None: raise Exception("No %s ID given" % self.target_object_cls.__name__) if target_object == None: raise Exception("Target object '%s' does not exist" % target_object_id ) if self.caller_owns_target and not target_object.owned_by( caller_user ) and not caller_user.is_admin: raise Exception("Target object '%s' is not owned by '%s'" % (target_object_id, caller_user.email)) if self.pass_caller_user: kw[self.pass_caller_user] = caller_user # all check pass... result = func( *args, **kw ) assert isinstance( result, bool ), "Internal Bind error" return result
def Create( cls, user, volume, **kwargs ): """ Create a gateway. NOTE: careful--caps are required! don't let users call this directly. """ # enforce volume ID kwargs['volume_id'] = volume.volume_id # enforce ownership--make sure the calling user owns this gateway kwargs['owner_id'] = user.owner_id # populate kwargs with default values for missing attrs cls.fill_defaults( kwargs ) # sanity check: do we have everything we need? missing = cls.find_missing_attrs( kwargs ) if len(missing) != 0: raise Exception( "Missing attributes: %s" % (", ".join( missing ))) # sanity check: are our fields valid? invalid = cls.validate_fields( kwargs ) if len(invalid) != 0: raise Exception( "Invalid values for fields: %s" % (", ".join( invalid )) ) # what kind of gateway are we? gateway_type = kwargs['gateway_type'] # set capabilities correctly and safely kwargs['caps'] = cls.safe_caps( gateway_type, volume.default_gateway_caps ) # ID... g_id = random.randint( 0, 2**63 - 1 ) kwargs['g_id'] = g_id g_key_name = Gateway.make_key_name( g_id=g_id ) g_key = storagetypes.make_key( cls, g_key_name ) # create a nameholder and this gateway at once---there's a good chance we'll succeed gateway_nameholder_fut = GatewayNameHolder.create_async( kwargs['name'], g_id ) gateway_fut = cls.get_or_insert_async( g_key_name, **kwargs ) # wait for operations to complete storagetypes.wait_futures( [gateway_nameholder_fut, gateway_fut] ) # check for collision... gateway_nameholder = gateway_nameholder_fut.get_result() gateway = gateway_fut.get_result() if gateway_nameholder.g_id != g_id: # name collision... storagetypes.deferred.defer( Gateway.delete_all, [g_key] ) raise Exception( "Gateway '%s' already exists!" % kwargs['name'] ) if gateway.g_id != g_id: # ID collision... storagetypes.deferred.defer( Gateway.delete_all, [gateway_nameholder.key, g_key] ) raise Exception( "Gateway ID collision. Please try again." ) # we're good! return g_key
def __compactify_parent_delete( cls, volume_id, parent_id, free_file_id, free_dir_index, num_shards, compactify_continuation=None ): """ Given a free directory index, repeatedly find a child with a directory index value that can be swapped into a gap in the parent's index. That is, find children with index values that are beyond the number of children, and swap their index nodes with index nodes that represent gaps. """ old_max_cutoff = None refresh_attempts = 5 # in case there are no children to swap while refresh_attempts >= 0: # refresh the index max cutoff--it may have changed # the cutoff is the new number of children, after this entry has been deleted parent_max_cutoff = cls.GetNumChildren( volume_id, parent_id, num_shards ) - 1 if parent_max_cutoff is None: # directory doesn't exist anymore...nothing to compactify logging.info("Index node /%s/%s does not exist" % (volume_id, parent_id) ) return 0 if old_max_cutoff is not None: # choose the smallest parent size seen so far as the cutoff, since it maimizes the number of entries # that can be selected to fill the gap. If we don't do this, we could accidentally # loop forever by never finding an entry to replace the gap. parent_max_cutoff = min( old_max_cutoff, parent_max_cutoff ) if parent_max_cutoff < free_dir_index: # gap no longer exists--the directory shrank out from under it logging.info("Directory /%s/%s compactification threshold %s exceeded (by %s)" % (volume_id, parent_id, parent_max_cutoff, free_dir_index) ) return 0 if parent_max_cutoff == free_dir_index: # gap is at the end. logging.info("Directory /%s/%s entry is at the end (%s)" % (volume_id, parent_id, free_dir_index)) rc_fut = cls.__compactify_remove_index_async( volume_id, parent_id, free_file_id, free_dir_index ) storagetypes.wait_futures( [rc_fut] ) return 0 if parent_max_cutoff == 0: # nothing left to compactify! logging.info("Directory /%s/%s appears to be empty" % (volume_id, parent_id)) return 0 old_max_cutoff = parent_max_cutoff replaced_dir_index, child_idx = cls.__compactify_child_delete( volume_id, parent_id, free_file_id, free_dir_index, parent_max_cutoff ) if replaced_dir_index >= 0: # success! if compactify_continuation is not None: compactify_continuation( compacted_index_node=child_idx, replaced_index=replaced_dir_index ) # verify that we didn't leave a gap by compactifying # (can happen if another process creates an entry while we're compactifying) new_parent_max_cutoff = cls.GetNumChildren( volume_id, parent_id, num_shards ) if new_parent_max_cutoff is None: # directory doesn't exist anymore...nothing to compactify logging.info("Index node /%s/%s does not exist" % (volume_id, parent_id) ) return 0 if parent_max_cutoff < new_parent_max_cutoff: # left a gap--need to compactify again free_dir_index = parent_max_cutoff old_max_cutoff = None continue else: # done! logging.info("Directory /%s/%s compactified" % (volume_id, parent_id)) return 0 elif replaced_dir_index == -errno.EAGAIN or replaced_dir_index == -errno.EPERM: # need to re-check the maximum cutoff # (NOTE: EPERM can mean that the children beyond the cutoff aren't showing up in queries yet) # TODO: can loop forever? logging.info("__compactify_child_delete( /%s/%s index=%s threshold=%s ) rc = %s" % (volume_id, parent_id, free_dir_index, parent_max_cutoff, replaced_dir_index)) refresh_attempts -= 1 time.sleep(1) # see if the datacenter will catch up continue else: logging.error("BUG: failed to compactify /%s/%s, rc = %s\n", volume_id, parnet_id, replaced_dir_index ) return replaced_dir_index
else: return result_fut @classmethod def __free( cls, volume_id, parent_id, file_id, dir_index, async=False ): """ Get or create a free index node, for a given directory index. Return True if we succeeded. Return False if the node already exists for this dir_index value, or if it's already freed (or the file ID is wrong) """ result_fut = cls.__update_or_alloc_async( volume_id, parent_id, file_id, dir_index, -1, False ) if not async: storagetypes.wait_futures( [result_fut] ) return result_fut.get_result() else: return result_fut @classmethod def __num_children_inc( cls, volume_id, parent_id, num_shards, do_transaction=True, async=False ): """ Increment the number of children in a directory. """ counter_name = cls.__parent_child_counter_name( volume_id, parent_id ) if async:
def Create( cls, user, volume_cert ): """ Create a Volume to be owned by a user. The user, being the volume owner, gets full control over it. NOTE: the caller will need to have validated and verified the authenticity of volume_cert. NOTE: this calls should be followed up with a VolumeCertBundle.Put() to put the caller's new volume cert bundle """ # sanity check if not user: raise Exception( "No user given" ) if user.owner_id != volume_cert.owner_id: raise Exception("Invalid user: %s != %s" % (user.owner_id, volume_cert.owner_id )) kwargs = cls.cert_to_dict( volume_cert ) # Validate (should be fine) missing = Volume.find_missing_attrs( kwargs ) if len(missing) != 0: raise Exception( "Missing attributes: %s" % (", ".join( missing ))) invalid = Volume.validate_fields( kwargs ) if len(invalid) != 0: raise Exception( "Invalid values for fields: %s" % (", ".join( invalid )) ) # sanity check if len(kwargs['name']) == 0: raise Exception("Empty volume name") volume_id = kwargs['volume_id'] volume_key_name = Volume.make_key_name( volume_id=volume_id ) volume_key = storagetypes.make_key( Volume, volume_key_name ) # put the Volume and nameholder at the same time---there's a good chance we'll succeed volume_nameholder_fut = VolumeNameHolder.create_async( kwargs['name'], volume_id ) volume_fut = Volume.get_or_insert_async( volume_key_name, name=kwargs['name'], blocksize=kwargs['blocksize'], description=kwargs['description'], owner_id=user.owner_id, volume_id=volume_id, version=kwargs['version'], private=kwargs['private'], archive=kwargs['archive'], allow_anon = kwargs['allow_anon'], metadata_public_key = kwargs['metadata_public_key'], deleted=False, volume_cert_bin=volume_cert.SerializeToString() ) storagetypes.wait_futures( [volume_nameholder_fut, volume_fut] ) # verify that there was no collision volume = volume_fut.get_result() volume_nameholder = volume_nameholder_fut.get_result() if volume_nameholder.volume_id != volume_id: # name collision storagetypes.deferred.defer( Volume.delete_all, [volume_key] ) raise Exception( "Volume '%s' already exists!" % kwargs['name']) if volume.volume_id != volume_id: # ID collision storagetypes.deferred.defer( Volume.delete_all, [volume_key, volume_nameholder.key] ) raise Exception( "Volume ID collision. Please try again" ) # set permissions """ req = VolumeAccessRequest.create_async( user.owner_id, volume_id, kwargs['name'], random.randint(-2**63, 2**63 - 1), VolumeAccessRequest.STATUS_GRANTED, gateway_caps=kwargs['default_gateway_caps'], allowed_gateways=[], request_message="Created").get_result() """ return volume_key
def Create(cls, user, **kwargs): """ Given volume data, store it. Update the corresponding SyndicateUser atomically along with creating the Volume so that the SyndicateUser owns the Volume. Arguments: user -- SyndicateUser instance that will own this Volume Required keyword arguments: name -- name of the Volume (str) blocksize -- size of the Volume's blocks in bytes (int) description -- description of the Volume (str) private -- whether or not this Volume is visible to other users (bool) Optional keyword arguments: metadata_private_key -- PEM-encoded RSA private key, 4096 bits (str) archive -- whether or not this Volume is populated only by Acquisition Gateways (bool) default_gateway_caps -- bitfield of capabilities Gateways created within this Volume should receive """ # sanity check if not user: raise Exception("No user given") kwargs[ 'owner_id'] = 0 # will look up user and fill with owner ID once we validate input. Volume.fill_defaults(kwargs) # extract public key from private key if needed Volume.extract_keys('metadata_public_key', 'metadata_private_key', kwargs, VOLUME_RSA_KEYSIZE) # Validate missing = Volume.find_missing_attrs(kwargs) if len(missing) != 0: raise Exception("Missing attributes: %s" % (", ".join(missing))) invalid = Volume.validate_fields(kwargs) if len(invalid) != 0: raise Exception("Invalid values for fields: %s" % (", ".join(invalid))) # vet the keys for key_field in ['metadata_public_key', 'metadata_private_key']: key_str = kwargs[key_field] valid = cls.is_valid_key(key_str, VOLUME_RSA_KEYSIZE) if not valid: raise Exception("Key must be a %s-bit RSA key" % (VOLUME_RSA_KEYSIZE)) # attempt to create the Volume volume_id = random.randint(1, 2**63 - 1) volume_key_name = Volume.make_key_name(volume_id=volume_id) volume_key = storagetypes.make_key(Volume, volume_key_name) # put the Volume and nameholder at the same time---there's a good chance we'll succeed volume_nameholder_fut = VolumeNameHolder.create_async( kwargs['name'], volume_id) volume_fut = Volume.get_or_insert_async( volume_key_name, name=kwargs['name'], blocksize=kwargs['blocksize'], description=kwargs['description'], owner_id=user.owner_id, volume_id=volume_id, active=kwargs.get('active', False), version=1, cert_version=1, private=kwargs['private'], archive=kwargs['archive'], allow_anon=kwargs['allow_anon'], metadata_public_key=kwargs['metadata_public_key'], metadata_private_key=kwargs['metadata_private_key'], default_gateway_caps=kwargs['default_gateway_caps']) storagetypes.wait_futures([volume_nameholder_fut, volume_fut]) # verify that there was no collision volume = volume_fut.get_result() volume_nameholder = volume_nameholder_fut.get_result() if volume_nameholder.volume_id != volume_id: # name collision storagetypes.deferred.defer(Volume.delete_all, [volume_key]) raise Exception("Volume '%s' already exists!" % kwargs['name']) if volume.volume_id != volume_id: # ID collision storagetypes.deferred.defer(Volume.delete_all, [volume_key, volume_nameholder.key]) raise Exception("Volume ID collision. Please try again") # set permissions req = VolumeAccessRequest.create_async( user.owner_id, volume_id, kwargs['name'], random.randint(-2**63, 2**63 - 1), VolumeAccessRequest.STATUS_GRANTED, gateway_caps=kwargs['default_gateway_caps'], allowed_gateways=(1 << GATEWAY_TYPE_AG) | (1 << GATEWAY_TYPE_UG) | (1 << GATEWAY_TYPE_RG), request_message="Created").get_result() return volume_key
def _resolve( owner_id, volume, file_id, file_version, write_nonce, page_id, file_ids_only=False ): """ Read file and listing of the given file_id. """ all_ents = [] file_fut = None error = 0 need_refresh = True file_data_fut = None file_memcache = MSEntry.Read( volume, file_id, memcache_keys_only=True ) file_data = storagetypes.memcache.get( file_memcache ) # do we need to consult the datastore? if file_data is None: logging.info( "file %s not cached" % file_id ) file_data_fut = MSEntry.Read( volume, file_id, futs_only=True ) all_futs = MSEntry.FlattenFuture( file_data_fut ) storagetypes.wait_futures( all_futs ) file_data = MSEntry.FromFuture( file_data_fut ) if file_data is not None: cacheable = { file_memcache: file_data } logging.info( "cache file %s (%s)" % (file_id, file_data) ) storagetypes.memcache.set_multi( cacheable ) if file_data is not None: # got data... logging.info("%s has type %s version %s write_nonce %s" % (file_data.name, file_data.ftype, file_data.version, file_data.write_nonce)) # do we need to actually send this? if file_data.version == file_version and file_data.write_nonce == write_nonce: need_refresh = False else: if file_data.ftype == MSENTRY_TYPE_DIR: error, listing, next_cursor = MSEntry.ListDir( volume, file_id, page_id, owner_id=owner_id, file_ids_only=file_ids_only ) if error == 0: if listing is not None: if file_ids_only: all_ents = [file_data.file_id] + listing else: all_ents = [file_data] + listing else: if file_ids_only: all_ents = [file_data.file_id] else: all_ents = [file_data] #logging.info("listing of %s: %s" % (file_data.file_id, listing)) else: if file_ids_only: all_ents = [file_data.file_id] else: all_ents = [file_data] # check security if error == 0: error = -errno.EACCES if file_data is None: # not found error = -errno.ENOENT elif file_data.ftype == MSENTRY_TYPE_DIR: # directory. check permissions if file_data.owner_id == owner_id or (file_data.mode & 0055) != 0: # readable error = 0 elif file_data.ftype == MSENTRY_TYPE_FILE: # file. check permissions if file_data.owner_id == owner_id or (file_data.mode & 0044) != 0: # readable error = 0 reply = make_ms_reply( volume, error ) if error == 0: # all is well. reply.listing.ftype = file_data.ftype # modified? if not need_refresh: reply.listing.status = ms_pb2.ms_listing.NOT_MODIFIED else: reply.listing.status = ms_pb2.ms_listing.NEW for ent in all_ents: if file_ids_only: # just file ID reply.listing.file_ids.append( ent.file_id ) else: # full ent ent_pb = reply.listing.entries.add() ent.protobuf( ent_pb ) # logging.info("Resolve %s: Serve back: %s" % (file_id, all_ents)) else: reply.listing.ftype = 0 reply.listing.status = ms_pb2.ms_listing.NONE # sign and deliver return (error, file_update_complete_response( volume, reply ))
def Create( cls, user, **kwargs ): """ Given volume data, store it. Update the corresponding SyndicateUser atomically along with creating the Volume so that the SyndicateUser owns the Volume. Arguments: user -- SyndicateUser instance that will own this Volume Required keyword arguments: name -- name of the Volume (str) blocksize -- size of the Volume's blocks in bytes (int) description -- description of the Volume (str) private -- whether or not this Volume is visible to other users (bool) Optional keyword arguments: metadata_private_key -- PEM-encoded RSA private key, 4096 bits (str) archive -- whether or not this Volume is populated only by Acquisition Gateways (bool) default_gateway_caps -- bitfield of capabilities Gateways created within this Volume should receive """ # sanity check if not user: raise Exception( "No user given" ) kwargs['owner_id'] = 0 # will look up user and fill with owner ID once we validate input. Volume.fill_defaults( kwargs ) # extract public key from private key if needed Volume.extract_keys( 'metadata_public_key', 'metadata_private_key', kwargs, VOLUME_RSA_KEYSIZE ) # Validate missing = Volume.find_missing_attrs( kwargs ) if len(missing) != 0: raise Exception( "Missing attributes: %s" % (", ".join( missing ))) invalid = Volume.validate_fields( kwargs ) if len(invalid) != 0: raise Exception( "Invalid values for fields: %s" % (", ".join( invalid )) ) # vet the keys for key_field in ['metadata_public_key', 'metadata_private_key']: key_str = kwargs[key_field] valid = cls.is_valid_key( key_str, VOLUME_RSA_KEYSIZE ) if not valid: raise Exception("Key must be a %s-bit RSA key" % (VOLUME_RSA_KEYSIZE) ) # attempt to create the Volume volume_id = random.randint( 1, 2**63 - 1 ) volume_key_name = Volume.make_key_name( volume_id=volume_id ) volume_key = storagetypes.make_key( Volume, volume_key_name ) # put the Volume and nameholder at the same time---there's a good chance we'll succeed volume_nameholder_fut = VolumeNameHolder.create_async( kwargs['name'], volume_id ) volume_fut = Volume.get_or_insert_async( volume_key_name, name=kwargs['name'], blocksize=kwargs['blocksize'], description=kwargs['description'], owner_id=user.owner_id, volume_id=volume_id, active=kwargs.get('active',False), version=1, cert_version=1, private=kwargs['private'], archive=kwargs['archive'], allow_anon = kwargs['allow_anon'], metadata_public_key = kwargs['metadata_public_key'], metadata_private_key = kwargs['metadata_private_key'], default_gateway_caps = kwargs['default_gateway_caps'] ) storagetypes.wait_futures( [volume_nameholder_fut, volume_fut] ) # verify that there was no collision volume = volume_fut.get_result() volume_nameholder = volume_nameholder_fut.get_result() if volume_nameholder.volume_id != volume_id: # name collision storagetypes.deferred.defer( Volume.delete_all, [volume_key] ) raise Exception( "Volume '%s' already exists!" % kwargs['name']) if volume.volume_id != volume_id: # ID collision storagetypes.deferred.defer( Volume.delete_all, [volume_key, volume_nameholder.key] ) raise Exception( "Volume ID collision. Please try again" ) # set permissions req = VolumeAccessRequest.create_async( user.owner_id, volume_id, kwargs['name'], random.randint(-2**63, 2**63 - 1), VolumeAccessRequest.STATUS_GRANTED, gateway_caps=kwargs['default_gateway_caps'], allowed_gateways=(1 << GATEWAY_TYPE_AG)|(1 << GATEWAY_TYPE_UG)|(1 << GATEWAY_TYPE_RG), request_message="Created").get_result() return volume_key
def Create(cls, user, volume, **kwargs): """ Create a gateway. NOTE: careful--caps are required! don't let users call this directly. """ # enforce volume ID kwargs['volume_id'] = volume.volume_id # enforce ownership--make sure the calling user owns this gateway kwargs['owner_id'] = user.owner_id # populate kwargs with default values for missing attrs cls.fill_defaults(kwargs) # sanity check: do we have everything we need? missing = cls.find_missing_attrs(kwargs) if len(missing) != 0: raise Exception("Missing attributes: %s" % (", ".join(missing))) # sanity check: are our fields valid? invalid = cls.validate_fields(kwargs) if len(invalid) != 0: raise Exception("Invalid values for fields: %s" % (", ".join(invalid))) # what kind of gateway are we? gateway_type = kwargs['gateway_type'] # set capabilities correctly and safely kwargs['caps'] = cls.safe_caps(gateway_type, volume.default_gateway_caps) # enforce cert generation kwargs['need_cert'] = Gateway.needs_cert(gateway_type, kwargs['caps']) # ID... g_id = random.randint(0, 2**63 - 1) kwargs['g_id'] = g_id g_key_name = Gateway.make_key_name(g_id=g_id) g_key = storagetypes.make_key(cls, g_key_name) # create a nameholder and this gateway at once---there's a good chance we'll succeed gateway_nameholder_fut = GatewayNameHolder.create_async( kwargs['name'], g_id) gateway_fut = cls.get_or_insert_async(g_key_name, **kwargs) # wait for operations to complete storagetypes.wait_futures([gateway_nameholder_fut, gateway_fut]) # check for collision... gateway_nameholder = gateway_nameholder_fut.get_result() gateway = gateway_fut.get_result() if gateway_nameholder.g_id != g_id: # name collision... storagetypes.deferred.defer(Gateway.delete_all, [g_key]) raise Exception("Gateway '%s' already exists!" % kwargs['name']) if gateway.g_id != g_id: # ID collision... storagetypes.deferred.defer(Gateway.delete_all, [gateway_nameholder.key, g_key]) raise Exception("Gateway ID collision. Please try again.") # we're good! return g_key
def Create( cls, user, volume, gateway_cert, driver_text ): """ Create a gateway, using its user-signed gateway certificate. NOTE: the caller must verify the authenticity of the certificate. """ kwargs = cls.cert_to_dict( gateway_cert ) # sanity check if kwargs['volume_id'] != volume.volume_id: raise Exception("Volume ID mismatch: cert has %s; expected %s" % (kwargs['volume_id'], volume.volume_id)) if kwargs['owner_id'] != user.owner_id: raise Exception("User ID mismatch: cert has %s; expected %s" % (kwargs['owner_id'], user.owner_id) ) # sanity check: do we have everything we need? missing = cls.find_missing_attrs( kwargs ) if len(missing) != 0: raise Exception( "Missing attributes: %s" % (", ".join( missing ))) # sanity check: are our fields valid? invalid = cls.validate_fields( kwargs ) if len(invalid) != 0: raise Exception( "Invalid values for fields: %s" % (", ".join( invalid )) ) # sanity check: does the driver match the driver's hash in the cert? if driver_text is not None: driver_hash = GatewayDriver.hash_driver( driver_text ) if driver_hash != binascii.hexlify( gateway_cert.driver_hash ): raise Exception("Driver hash mismatch: len = %s, expected = %s, got = %s" % (len(driver_text), driver_hash, binascii.hexlify( cert.driver_hash ))) gateway_type = kwargs['gateway_type'] # enforce cert distribution kwargs['need_cert'] = Gateway.needs_cert( gateway_type, kwargs['caps'] ) g_id = kwargs['g_id'] g_key_name = Gateway.make_key_name( g_id=g_id ) g_key = storagetypes.make_key( cls, g_key_name ) # create a nameholder and this gateway at once---there's a good chance we'll succeed futs = [] gateway_nameholder_fut = GatewayNameHolder.create_async( kwargs['name'], g_id ) gateway_fut = cls.get_or_insert_async( g_key_name, **kwargs ) futs = [gateway_nameholder_fut, gateway_fut] gateway_driver = None if driver_text is not None: gateway_driver = GatewayDriver.create_or_ref( driver_text ) # wait for operations to complete storagetypes.wait_futures( futs ) # check for collision... gateway_nameholder = gateway_nameholder_fut.get_result() gateway = gateway_fut.get_result() to_rollback = [] if gateway_driver is not None: to_rollback.append( gateway_driver.key ) if gateway_nameholder.g_id != g_id: # name collision... to_rollback.append( g_key ) storagetypes.deferred.defer( Gateway.delete_all, to_rollback ) raise Exception( "Gateway '%s' already exists!" % kwargs['name'] ) if gateway.g_id != g_id: # ID collision... to_rollback.append( gateway_nameholder.key ) to_rollback.append( g_key ) storagetypes.deferred.defer( Gateway.delete_all, to_rollback ) raise Exception( "Gateway ID collision. Please try again." ) # we're good! return g_key
def _getattr( owner_id, volume, file_id, file_version, write_nonce ): """ Read one file/directory's metadata, by file ID """ error = 0 access_error = 0 need_refresh = True file_data = MSEntry.Read( volume, file_id ) if file_data is not None: # got data... # do we need to actually send this? if file_data.version == file_version and file_data.write_nonce == write_nonce: need_refresh = False logging.info("%s has type %s version %s write_nonce %s, status=NOCHANGE" % (file_data.name, file_data.ftype, file_data.version, file_data.write_nonce)) else: logging.info("%s has type %s version %s write_nonce %s, status=NEW" % (file_data.name, file_data.ftype, file_data.version, file_data.write_nonce)) error = file_read_allowed( owner_id, file_data ) else: # not found error = -errno.ENOENT reply = make_ms_reply( volume, error ) if error == 0: # all is well. reply.listing.ftype = file_data.ftype # modified? if not need_refresh: reply.listing.status = ms_pb2.ms_listing.NOT_MODIFIED else: reply.listing.status = ms_pb2.ms_listing.NEW # child count if directory num_children = file_data.num_children generation = file_data.generation if num_children is None: num_children = 0 if generation is None: generation = 0 if file_data.ftype == MSENTRY_TYPE_DIR: num_children_fut = MSEntryIndex.GetNumChildren( volume.volume_id, file_id, volume.num_shards, async=True ) storagetypes.wait_futures( [num_children_fut] ) num_children = num_children_fut.get_result() # full ent ent_pb = reply.listing.entries.add() file_data.protobuf( ent_pb, num_children=num_children, generation=generation ) # logging.info("Getattr %s: Serve back: %s" % (file_id, file_data)) else: # not possible to reply reply.listing.ftype = 0 reply.listing.status = ms_pb2.ms_listing.NONE # sign and deliver return (error, file_update_complete_response( volume, reply ))
def Create(cls, user, volume, gateway_cert, driver_text): """ Create a gateway, using its user-signed gateway certificate. NOTE: the caller must verify the authenticity of the certificate. """ kwargs = cls.cert_to_dict(gateway_cert) # sanity check if kwargs['volume_id'] != volume.volume_id: raise Exception("Volume ID mismatch: cert has %s; expected %s" % (kwargs['volume_id'], volume.volume_id)) if kwargs['owner_id'] != user.owner_id: # this is only okay if the user is the volume owner, and the gateway ID is the anonymous gateway if not (kwargs['owner_id'] == USER_ID_ANON and volume.owner_id == user.owner_id): raise Exception("User ID mismatch: cert has %s; expected %s" % (kwargs['owner_id'], user.owner_id)) # sanity check: do we have everything we need? missing = cls.find_missing_attrs(kwargs) if len(missing) != 0: raise Exception("Missing attributes: %s" % (", ".join(missing))) # sanity check: are our fields valid? invalid = cls.validate_fields(kwargs) if len(invalid) != 0: raise Exception("Invalid values for fields: %s" % (", ".join(invalid))) # sanity check: does the driver match the driver's hash in the cert? if driver_text is not None: driver_hash = GatewayDriver.hash_driver(driver_text) if driver_hash != binascii.hexlify(gateway_cert.driver_hash): raise Exception( "Driver hash mismatch: len = %s, expected = %s, got = %s" % (len(driver_text), driver_hash, binascii.hexlify(cert.driver_hash))) gateway_type = kwargs['gateway_type'] # enforce cert distribution kwargs['need_cert'] = Gateway.needs_cert(gateway_type, kwargs['caps']) g_id = kwargs['g_id'] g_key_name = Gateway.make_key_name(g_id=g_id) g_key = storagetypes.make_key(cls, g_key_name) # create a nameholder and this gateway at once---there's a good chance we'll succeed futs = [] gateway_nameholder_fut = GatewayNameHolder.create_async( kwargs['name'], g_id) gateway_fut = cls.get_or_insert_async(g_key_name, **kwargs) futs = [gateway_nameholder_fut, gateway_fut] gateway_driver = None if driver_text is not None: gateway_driver = GatewayDriver.create_or_ref(driver_text) # wait for operations to complete storagetypes.wait_futures(futs) # check for collision... gateway_nameholder = gateway_nameholder_fut.get_result() gateway = gateway_fut.get_result() to_rollback = [] if gateway_driver is not None: to_rollback.append(gateway_driver.key) if gateway_nameholder.g_id != g_id: # name collision... to_rollback.append(g_key) storagetypes.deferred.defer(Gateway.delete_all, to_rollback) raise Exception("Gateway '%s' already exists!" % kwargs['name']) if gateway.g_id != g_id: # ID collision... to_rollback.append(gateway_nameholder.key) to_rollback.append(g_key) storagetypes.deferred.defer(Gateway.delete_all, to_rollback) raise Exception("Gateway ID collision. Please try again.") # we're good! return g_key
def response_load_volume_and_gateway(request_handler, volume_id, gateway_id=None): """ Load a volume and the gateway from the request handler. Return (volume, gateway, volume_cert_bundle, status, time) """ read_start = storagetypes.get_time() # get the gateway's ID and credentials g_id = None if gateway_id is None: gateway_type, g_id, signature_b64 = response_read_gateway_basic_auth( request_handler.request.headers) else: g_id = gateway_id volume = None gateway = None cert_bundle = None volume_fut = Volume.Read(volume_id, async=True) cert_bundle_fut = VolumeCertBundle.Get(volume_id, async=True) gateway_fut = None if g_id is not None: gateway_fut = Gateway.Read(g_id, async=True) storagetypes.wait_futures([volume_fut, gateway_fut, cert_bundle_fut]) volume = volume_fut.get_result() cert_bundle = cert_bundle_fut.get_result() if gateway_fut is not None: gateway = gateway_fut.get_result() if volume is None or cert_bundle is None: logging.error("No volume, gateway, or cert bundle") response_user_error(request_handler, 404) return (None, None, None, 404, None) ''' Volume.SetCache( volume.volume_id, volume ) VolumeCertBundle.SetCache( volume.volume_id, cert_bundle ) if gateway is not None: Gateway.SetCache( gateway.g_id, gateway ) ''' # sanity checks if (volume.need_gateway_auth()) and (gateway is None or gateway_type is None or signature_b64 is None): # required authentication, but we don't have an Authentication header logging.error("Unable to authenticate gateway") return (None, None, None, 403, None) # need auth? if volume.need_gateway_auth() and gateway is None: logging.error("Unable to authenticate gateway") return (None, None, None, 403, None) # gateway validity if gateway is not None: # type match? if gateway_type is not None and gateway.gateway_type != gateway_type: logging.error("Type mismatch on %s:%s" % (gateway_type, g_id)) response_user_error(request_handler, 403) return (None, None, None, 403, None) # is the gateway in this volume? if not volume.is_gateway_in_volume(gateway): logging.error("Gateway '%s' is not in volume '%s'" % (gateway.name, volume.name)) response_user_error(request_handler, 403) return (None, None, None, 403, None) # make sure this gateway's cert is registered valid_gateway = gateway.authenticate_session( gateway_type, g_id, request_handler.request.url, signature_b64) if not valid_gateway and volume.need_gateway_auth(): # invalid credentials logging.error("Invalid authentication credentials") response_user_error(request_handler, 403) return (None, None, None, 403, None) read_time = storagetypes.get_time() - read_start return (volume, gateway, cert_bundle, 200, read_time)
def response_load_volume_and_gateway( request_handler, volume_id, gateway_id=None ): """ Load a volume and the gateway from the request handler. Return (volume, gateway, volume_cert_bundle, status, time) """ read_start = storagetypes.get_time() # get the gateway's ID and credentials g_id = None if gateway_id is None: gateway_type, g_id, signature_b64 = response_read_gateway_basic_auth( request_handler.request.headers ) else: g_id = gateway_id volume = None gateway = None cert_bundle = None volume_fut = Volume.Read( volume_id, async=True ) cert_bundle_fut = VolumeCertBundle.Get( volume_id, async=True ) gateway_fut = None if g_id is not None: gateway_fut = Gateway.Read( g_id, async=True ) storagetypes.wait_futures( [volume_fut, gateway_fut, cert_bundle_fut] ) volume = volume_fut.get_result() cert_bundle = cert_bundle_fut.get_result() if gateway_fut is not None: gateway = gateway_fut.get_result() if volume is None or cert_bundle is None: logging.error("No volume, gateway, or cert bundle") response_user_error( request_handler, 404 ) return (None, None, None, 404, None) Volume.SetCache( volume.volume_id, volume ) VolumeCertBundle.SetCache( volume.volume_id, cert_bundle ) if gateway is not None: Gateway.SetCache( gateway.g_id, gateway ) # sanity checks if (volume.need_gateway_auth()) and (gateway is None or gateway_type is None or signature_b64 is None): # required authentication, but we don't have an Authentication header logging.error("Unable to authenticate gateway") return (None, None, None, 403, None) # need auth? if volume.need_gateway_auth() and gateway is None: logging.error("Unable to authenticate gateway") return (None, None, None, 403, None) # gateway validity if gateway is not None: # type match? if gateway_type is not None and gateway.gateway_type != gateway_type: logging.error("Type mismatch on %s:%s" % (gateway_type, g_id)) response_user_error( request_handler, 403 ) return (None, None, None, 403, None ) # is the gateway in this volume? if not volume.is_gateway_in_volume( gateway ): logging.error("Gateway '%s' is not in volume '%s'" % (gateway.name, volume.name)) response_user_error( request_handler, 403 ) return (None, None, None, 403, None) # make sure this gateway's cert is registered valid_gateway = gateway.authenticate_session( gateway_type, g_id, request_handler.request.url, signature_b64 ) if not valid_gateway and volume.need_gateway_auth(): # invalid credentials logging.error("Invalid authentication credentials") response_user_error( request_handler, 403 ) return (None, None, None, 403, None) read_time = storagetypes.get_time() - read_start return (volume, gateway, cert_bundle, 200, read_time)