def _s3Auth(self): self._LOGGER.info('Authentication') if self.s3Request == None: #fatto errore nella ErrorCodeFactory self._LOGGER.error(ErrCode.err['InternalServerError']['message']) raise RestFSError(ErrCode.err['InternalServerError']['code'],\ ErrCode.err['InternalServerError']['message'],\ ErrCode.err['InternalServerError']['http']) # if self.fsRequest(): if self.s3Request.id == None : # Anonymous self._LOGGER.info('User Anonymous') self.user = User(0,"anonymous","Anonymous","","",0,0) else: self._LOGGER.info('Check identity user') auth = self.getAuthManager() self.user = auth.checkS3Auth(self.s3Request.id,self.s3Request.signature,self.s3Request.getStringToSign()) self._LOGGER.debug("Fs Request id %s " % self.s3Request.id) self._LOGGER.debug("Fs Request signature %s " % self.s3Request.signature) self._LOGGER.debug("Fs Request getStringToSign %s " % self.s3Request.getStringToSign) if self.user == None: #fatto errore nella ErrorCodeFactory self._LOGGER.error(ErrCode.err['LoginFailed']['message']) raise RestFSError(ErrCode.err['LoginFailed']['code'],\ ErrCode.err['LoginFailed']['message'],\ ErrCode.err['LoginFailed']['http'])
def _putObjectAcl(self): self._LOGGER.info("putting the object acl") myDict = XmlToDict(self.getBody()).getDict() myList = myDict['AccessControlPolicy']['AccessControlList']['Grant'] grantList = [] if not type(myList) is types.DictType: for grantDict in myList: if not grantDict['Grantee'].has_key('ID'): self._LOGGER.warning( 'Putting different ACL from the usal CanonicalID, currently not implemented' ) raise RestFSError(errCode.err['NotImplemented']['code'],\ errCode.err['NotImplemented']['message'],\ errCode.err['NotImplemented']['http']) grant = Grant() grant.uid = grantDict['Grantee']['ID'] grant.permission = ACPHelper.s3AclToPerm( grantDict['Permission']) grantList.append(grant) else: grant = Grant() grant.uid = myList['Grantee']['ID'] grant.permission = ACPHelper.s3AclToPerm(myList['Permission']) grantList.append(grant) self.application.objectSrv.putObjectACL(self.s3Request.BUCKET, self.s3Request.OBJECT, self.s3Request.ID_REQUEST, self.getUser(), grantList) return None
def _checkOwner(self, user, prop): self._LOGGER.info("Check if the user is the object owner") if prop.uid != user.uid: self._LOGGER.warning('The user is not the object owner') raise RestFSError(errCode.err['AuthorizationDeny']['code'],\ errCode.err['AuthorizationDeny']['message'],\ errCode.err['AuthorizationDeny']['http']) return
def _checkObjectExist(self, idObject): self._LOGGER.info("Check if the object exists") obj = self.meta.getObjectProperties(idObject) if not obj: self._LOGGER.warning('The object does not exist') raise RestFSError(errCode.err['BucketNotFound']['code'],\ errCode.err['BucketNotFound']['message'],\ errCode.err['BucketNotFound']['http']) return obj
def _checkBucketExist(self, bucket_name): self._LOGGER.info("Check if the bucket exists") prop = self.meta.getBucketProperty(bucket_name) if not prop: self._LOGGER.warning('The bucket does not exist') raise RestFSError(errCode.err['BucketNotFound']['code'],\ errCode.err['BucketNotFound']['message'],\ errCode.err['BucketNotFound']['http']) return prop
def _checkBucketPerm(self, bucket_name, user, prop, op): self._LOGGER.info("Check if the user has permission on the bucket") self._LOGGER.debug('Permission-type : %s' % op) if user.uid == prop.uid: return True #Permission deny no acp elif prop.acp: acp = self.meta.getBucketACP(bucket_name) if acp.grantee(user, op): return True self._LOGGER.warning('The user cannot do this operation the bucket') raise RestFSError(errCode.err['AuthorizationDeny']['code'],\ errCode.err['AuthorizationDeny']['message'],\ errCode.err['AuthorizationDeny']['http'])
def amzToPerm(amz): perm = {} if amz == S3.AMZ_PRIVATE: perm = {RestFS.ADD_FILE:RestFS.ALLOW,RestFS.ADD_SUBDIR:RestFS.ALLOW, RestFS.ADMIN:RestFS.ALLOW, RestFS.APPEND_DATA:RestFS.ALLOW,\ RestFS.DELETE_CHILD:RestFS.ALLOW,RestFS.DELETE_FILE:RestFS.ALLOW, RestFS.EXECUTE:RestFS.ALLOW, RestFS.FLAG:RestFS.ALLOW,\ RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, \ RestFS.READ_XATTR:RestFS.ALLOW, RestFS.WRITE_ACL:RestFS.ALLOW, RestFS.WRITE_ATTR:RestFS.ALLOW, RestFS.WRITE_DATA:RestFS.ALLOW, \ RestFS.WRITE_OWNER:RestFS.ALLOW, RestFS.WRITE_XATTR:RestFS.ALLOW} elif amz == S3.AMZ_BUCKET_OWNER_FULL_CONTROL: perm = {RestFS.ADD_FILE:RestFS.ALLOW,RestFS.ADD_SUBDIR:RestFS.ALLOW, RestFS.ADMIN:RestFS.ALLOW, RestFS.APPEND_DATA:RestFS.ALLOW,\ RestFS.DELETE_CHILD:RestFS.ALLOW,RestFS.DELETE_FILE:RestFS.ALLOW, RestFS.EXECUTE:RestFS.ALLOW, RestFS.FLAG:RestFS.ALLOW,\ RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, \ RestFS.READ_XATTR:RestFS.ALLOW, RestFS.WRITE_ACL:RestFS.ALLOW, RestFS.WRITE_ATTR:RestFS.ALLOW, RestFS.WRITE_DATA:RestFS.ALLOW, \ RestFS.WRITE_OWNER:RestFS.ALLOW, RestFS.WRITE_XATTR:RestFS.ALLOW} elif amz == S3.AMZ_PUBLIC_READ: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW } elif amz == S3.AMZ_AUTHENTICATED_READ: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW } elif amz == S3.AMZ_BUCKET_OWNER_READ: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW, \ RestFS.ADMIN:RestFS.ALLOW} elif amz == S3.AMZ_PUBLIC_READ_WRITE: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW, \ RestFS.WRITE_ATTR:RestFS.ALLOW, RestFS.WRITE_ACL:RestFS.ALLOW, RestFS.WRITE_DATA:RestFS.ALLOW, \ RestFS.WRITE_XATTR:RestFS.ALLOW} else: raise RestFSError(errCode.err['MalformedACLError']['code'],\ errCode.err['MalformedACLError']['message'],\ errCode.err['MalformedACLError']['http']) return perm
def _checkObjectPerm(self,idObject, user, op): self._LOGGER.info("Check if the user has permission on the bucket") self._LOGGER.debug('Permission-type : %s' % op) acp = self.getObjectAcp(idObject) obj = self.getObjectProperties(idObject) if user.uid == obj.uid: return True if acp.grantee(user,op): return True #Permission deny no acp self._LOGGER.warning('The user cannot do this operation the bucket') raise RestFSError(errCode.err['AuthorizationDeny']['code'],\ errCode.err['AuthorizationDeny']['message'],\ errCode.err['AuthorizationDeny']['http'])
def granteeToGrant(myList): grantList = [] if not type(myList) is types.DictType: for grantDict in myList: if not grantDict['Grantee'].has_key('ID'): raise RestFSError(errCode.err['NotImplemented']['code'],\ errCode.err['NotImplemented']['message'],\ errCode.err['NotImplemented']['http']) grant = Grant() grant.uid = grantDict['Grantee']['ID'] grant.permission = s3AclToPerm(grantDict['Permission']) grantList.append(grant) else: grant = Grant() grant.uid = myList['Grantee']['ID'] grant.permission = s3AclToPerm(myList['Permission']) grantList.append(grant) return grantList
def amzToID(self, amz, user): id = None if amz == S3.AMZ_PRIVATE: id = user elif amz == S3.AMZ_PUBLIC_READ or amz == S3.AMZ_PUBLIC_READ_WRITE: id = RestFS.USER_ANONYMOUS elif amz == S3.AMZ_AUTHENTICATED_READ: id = RestFS.USER_AUTHENTICATED elif amz == S3.AMZ_BUCKET_OWNER_FULL_CONTROL or amz == S3.AMZ_BUCKET_OWNER_READ: id = user else: raise RestFSError(errCode.err['MalformedACLError']['code'],\ errCode.err['MalformedACLError']['message'],\ errCode.err['MalformedACLError']['http']) return id
def s3AclToPerm(self, s3Acl): perm = {} if s3Acl == S3.ACL_FULL_CONTROL: perm = {RestFS.ADD_FILE:RestFS.ALLOW,RestFS.ADD_SUBDIR:RestFS.ALLOW, RestFS.ADMIN:RestFS.ALLOW, RestFS.APPEND_DATA:RestFS.ALLOW,\ RestFS.DELETE_CHILD:RestFS.ALLOW,RestFS.DELETE_FILE:RestFS.ALLOW, RestFS.EXECUTE:RestFS.ALLOW, RestFS.FLAG:RestFS.ALLOW,\ RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, \ RestFS.READ_XATTR:RestFS.ALLOW, RestFS.WRITE_ACL:RestFS.ALLOW, RestFS.WRITE_ATTR:RestFS.ALLOW, RestFS.WRITE_DATA:RestFS.ALLOW, \ RestFS.WRITE_OWNER:RestFS.ALLOW, RestFS.WRITE_XATTR:RestFS.ALLOW} elif s3Acl == S3.ACL_WRITE_ACP: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW, \ RestFS.WRITE_ATTR:RestFS.ALLOW,RestFS.WRITE_ACL:RestFS.ALLOW, RestFS.WRITE_DATA:RestFS.ALLOW, \ RestFS.WRITE_XATTR:RestFS.ALLOW} elif s3Acl == S3.ACL_READ_ACP: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, RestFS.READ_ACL:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW} elif s3Acl == S3.ACL_READ: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW} elif s3Acl == S3.ACL_WRITE: perm = { RestFS.EXECUTE:RestFS.ALLOW, RestFS.LIST_DIR:RestFS.ALLOW, \ RestFS.READ_ATTR:RestFS.ALLOW, RestFS.READ_DATA:RestFS.ALLOW, RestFS.READ_XATTR:RestFS.ALLOW, \ RestFS.WRITE_ATTR:RestFS.ALLOW, RestFS.WRITE_DATA:RestFS.ALLOW, \ RestFS.WRITE_XATTR:RestFS.ALLOW} else: raise RestFSError(errCode.err['MalformedACLError']['code'],\ errCode.err['MalformedACLError']['message'],\ errCode.err['MalformedACLError']['http']) return perm
def write(self, bucket, object_name, uid, gid, context, storage_class, object_acl ,content_type, xattr, data_handler): #context oggetto #user id buckname idgrouputente [+ gruppi] sessione mode = "" # Operation Type based on object existance idObj = self.lookup(bucket, object_name) #FIX TH PATH if idObj == None: idObj = OF.ENOENT # Create a new Object (object not found) if idObj == OF.ENOENT: object_type = OF.TYPE_FILE if object_name[-1] == '/' : #the object is a directory object_type = OF.TYPE_DIR idObj = self.createObject(bucket, object_name, object_type, '', uid, gid, context) if idObj == OF.EACCESS: raise RestFSError(errCode.err['AuthorizationDeny']['code'],\ errCode.err['AuthorizationDeny']['message'],\ errCode.err['AuthorizationDeny']['http']) return elif idObj == OF.ENOENT: raise RestFSError(errCode.err['ObjectNotFound']['code'],\ errCode.err['ObjectNotFound']['message'],\ errCode.err['ObjectNotFound']['http']) #Get Object Properties obj = self.getProperties(bucket, idObj, OF.PROPERTY_OBJECT, uid, gid, context) idObj = obj['id'] #self.setObjectAcl(bucket, idObj, OF.PROPERTY_ACL, object_acl, uid, gid, context) #da modificare in setAttributes #check if i can change it #self._checkObjectPerm(obj.id , context.user,obj,BF.ACL_WRITE) # Object Dimension file_size = len(data_handler) block_counter = int(math.ceil(file_size/obj['block_size'])) block_counter_old = obj['block_counter'] segment_counter = int(math.ceil(file_size/(obj['segment_size']*obj['block_size']))) segment_counter_old = obj['segment_counter'] # Init struct for save data segmentId = 0 seg = self.getObjectSegment(bucket, idObj, segmentId, uid, gid, context) segBlock = self.getObjectSegment(bucket, idObj, str(segmentId)+'-Bid', uid, gid, context) myMd5 = md5.new() block_pos = 0 myStringIO = cStringIO.StringIO(data_handler) for blockId in range(1,block_counter+1) : if block_pos > obj['segment_size']: self.setObjectSegment(bucket, idObj, segmentId, seg, uid, gid, context) # Create a new segment numBlock BlockId self.setObjectSegment(bucket, idObj, str(segmentId)+'-Bid', segBlock, uid, gid, context) block_pos = 0 segmentId += 1 seg = self.getObjectSegment(bucket, idObj, segmentId, uid, gid, context) segBlock = self.getObjectSegment(bucket, idObj, str(segmentId)+'-Bid', uid, gid, context) block_pos += 1 new_block = myStringIO.read(int(obj['block_size'])) bhash = self._generateHash(new_block) bkey = self._getBlockKey(bucket, idObj, segmentId, blockId) if seg.has_key(blockId): hash_old = seg[blockId] bkey = segBlock[blockId] else: hash_old = None bkey = self._getBlockKey(bucket, idObj, segmentId, blockId) if bhash != hash_old: self.writeBlock(bucket, idObj, bkey, new_block, bhash, uid, gid, context) myMd5.update(new_block) seg[blockId] = bhash segBlock[blockId] = bkey #save the last segment self.setObjectSegment(bucket, idObj, segmentId, seg, uid, gid, context) # Create a new segment numBlock BlockId self.setObjectSegment(bucket, idObj, str(segmentId)+'-Bid', segBlock, uid, gid, context) if block_counter_old > block_counter: for i in range(blockId,blockId + (len(seg)-block_pos)): self.removeBlock(bucket, idObj, segBlock[i+1], uid, gid, context) del segBlock[i+1] del seg[i+1] self.setObjectSegment(bucket, idObj, segmentId, seg, uid, gid, context) self.setObjectSegment(bucket, idObj, str(segmentId)+'-Bid', segBlock, uid, gid, context) #Check if i have to delete segments if segment_counter < segment_counter_old: for segId in range (segment_counter+1,segment_counter_old+1): segBlock = self.getObjectSegment(bucket, idObj, str(segId)+'-Bid', uid, gid, context) self.delObjectSegment(bucket, idObj, segId, uid, gid, context) self.removeSegment(bucket, idObj, segBlock, uid, gid, context) obj['block_counter'] = block_counter obj['segment_counter'] = segment_counter obj['size'] = file_size obj['num_segments'] = segmentId #FIXME do a incremental job .. obj['md5'] = myMd5.hexdigest() self.setAttributes(bucket, idObj,obj, uid, gid, context) return obj
def _notImplemented(self): self._LOGGER.warning('Operation currently not implemented') raise RestFSError(errCode.err['NotImplemented']['code'],\ errCode.err['NotImplemented']['message'],\ errCode.err['NotImplemented']['http']) return
def putBucket(self, bucket_name, user, session, amz, location): # service operation for search self._LOGGER.info("Creating the bucket") self._LOGGER.debug('Bucket-name : %s' % bucket_name) context = '' if user.id == 0: self._LOGGER.warning( 'The user have not privileges to create the bucket') raise RestFSError(errCode.err['AuthorizationDeny']['code'],\ errCode.err['AuthorizationDeny']['message'],\ errCode.err['AuthorizationDeny']['http']) res = self.res.findBucket(bucket_name) if res > 0: self._LOGGER.warning( 'The bucket you have tried to create already exists') raise RestFSError(errCode.err['BucketAlreadyExists']['code'],\ errCode.err['BucketAlreadyExists']['message'],\ errCode.err['BucketAlreadyExists']['http']) # How many bucket can create ? num = self.res.getCountBucketByOwner(user.uid) self._LOGGER.debug('Bucket-number created : %s' % num) mymax = User.max_buckets self._LOGGER.debug('Bucket-number max the user can create : %s' % mymax) if num > mymax: self._LOGGER.warning( 'The user has reached the limit of the buckets to create') raise RestFSError(errCode.err['LimitBucketsReached']['code'],\ errCode.err['LimitBucketsReached']['message'],\ errCode.err['LimitBucketsReached']['http']) #check policy #FIX if res > 0: self._LOGGER.warning('The policy document specified is invalid') raise RestFSError(errCode.err['InvalidPolicyDocument']['code'],\ errCode.err['InvalidPolicyDocument']['message'],\ errCode.err['InvalidPolicyDocument']['http']) #check location loc = self.res.getRegionList() if not location: location = "EU" elif location not in loc: self._LOGGER.warning( 'The region/location the user specified was not found') raise RestFSError(errCode.err['RegionNotFound']['code'],\ errCode.err['RegionNotFound']['message'],\ errCode.err['RegionNotFound']['http']) #Find cluster # Set Administration privileges .. # Make registration of the new Bucket # Resource manager operation self.res.addBucket(bucket_name, user.uid) # Convert S3 Permission to RestFs Permission acp = ACP() grant = Grant() grant.permission = ACPHelper.amzToPerm(amz) grant.uid = user.uid grantList = [grant] acp.setByGrants(grantList) #FIX ME gid = '' self.meta.createBucket(bucket_name, acp, location, user.uid, gid, context) #Storage operation self.storage.createBucket(bucket_name, context) #Close the transaction, put bucket online self.res.setBucketStatus(bucket_name, 2, context)