def writer(): headers = self.encryptedHeaders if encrypt else self.headerValues if update: try: key.set_contents_from_stream(readable, headers=headers) except boto.exception.GSDataError: if encrypt: # https://github.com/boto/boto/issues/3518 # see self._writeFile for more pass else: raise else: try: # The if_condition kwarg insures that the existing key matches given # generation (version) before modifying anything. Setting # if_generation=0 insures key does not exist remotely key.set_contents_from_stream(readable, headers=headers, if_generation=0) except (boto.exception.GSResponseError, boto.exception.GSDataError) as e: if isinstance(e, boto.exception.GSResponseError): if e.status == 412: raise ConcurrentFileModificationException( key.name) else: raise e elif encrypt: # https://github.com/boto/boto/issues/3518 # see self._writeFile for more pass else: raise
def save(self): attributes, numNewContentChunks = self.toItem() # False stands for absence expected = ['version', False if self.previousVersion is None else self.previousVersion] try: for attempt in retry_sdb(): with attempt: assert self.outer.filesDomain.put_attributes(item_name=self.fileID, attributes=attributes, expected_value=expected) if self.previousVersion: self.outer.filesBucket.delete_key(self.fileID, version_id=self.previousVersion) self._previousVersion = self._version if numNewContentChunks < self._numContentChunks: residualChunks = xrange(numNewContentChunks, self._numContentChunks) attributes = [self._chunkName(i) for i in residualChunks] for attempt in retry_sdb(): with attempt: self.outer.filesDomain.delete_attributes(self.fileID, attributes=attributes) self._numContentChunks = numNewContentChunks except SDBResponseError as e: if e.error_code == 'ConditionalCheckFailed': raise ConcurrentFileModificationException(self.fileID) else: raise
def readFrom(self, readable): headers = store.encryptedHeaders if encrypt else store.headerValues if update: try: key.set_contents_from_stream(readable, headers=headers) except boto.exception.GSDataError: if encrypt: # https://github.com/boto/boto/issues/3518 # see self._writeFile for more pass else: raise else: try: # The if_generation argument insures that the existing key matches the # given generation, i.e. version, before modifying anything. Passing a # generation of 0 insures that the key does not exist remotely. key.set_contents_from_stream(readable, headers=headers, if_generation=0) except (boto.exception.GSResponseError, boto.exception.GSDataError) as e: if isinstance(e, boto.exception.GSResponseError): if e.status == 412: raise ConcurrentFileModificationException( key.name) else: raise e elif encrypt: # https://github.com/boto/boto/issues/3518 # see self._writeFile for more pass else: raise
def reader(): blockIDs = [] try: while True: buf = readable.read(maxBlockSize) if len(buf) == 0: # We're safe to break here even if we never read anything, since # putting an empty block list creates an empty blob. break if encrypted: buf = encryption.encrypt(buf, self.keyPath) blockID = self._newFileID() container.put_block(blob_name=jobStoreFileID, block=buf, blockid=blockID) blockIDs.append(blockID) except: # This is guaranteed to delete any uncommitted # blocks. container.delete_blob(blob_name=jobStoreFileID) raise if checkForModification and expectedVersion is not None: # Acquire a (60-second) write lock, leaseID = container.lease_blob( blob_name=jobStoreFileID, x_ms_lease_action='acquire')['x-ms-lease-id'] # check for modification, blobProperties = container.get_blob_properties( blob_name=jobStoreFileID) if blobProperties['etag'] != expectedVersion: container.lease_blob(blob_name=jobStoreFileID, x_ms_lease_action='release', x_ms_lease_id=leaseID) raise ConcurrentFileModificationException( jobStoreFileID) # commit the file, container.put_block_list(blob_name=jobStoreFileID, block_list=blockIDs, x_ms_lease_id=leaseID, x_ms_meta_name_values=dict( encrypted=str(encrypted))) # then release the lock. container.lease_blob(blob_name=jobStoreFileID, x_ms_lease_action='release', x_ms_lease_id=leaseID) else: # No need to check for modification, just blindly write over whatever # was there. container.put_block_list(blob_name=jobStoreFileID, block_list=blockIDs, x_ms_meta_name_values=dict( encrypted=str(encrypted)))
def readFrom(self, readable): blocks = [] try: while True: buf = readable.read(maxBlockSize) if len(buf) == 0: # We're safe to break here even if we never read anything, since # putting an empty block list creates an empty blob. break if encrypted: buf = encryption.encrypt(buf, store.keyPath) blockID = store._newFileID() container.put_block(blob_name=bytes(jobStoreFileID), block=buf, block_id=blockID) blocks.append(BlobBlock(blockID)) except: with panic(log=logger): # This is guaranteed to delete any uncommitted blocks. container.delete_blob(blob_name=bytes(jobStoreFileID)) if checkForModification and expectedVersion is not None: # Acquire a (60-second) write lock, leaseID = container.acquire_blob_lease( blob_name=bytes(jobStoreFileID), lease_duration=60) # check for modification, blob = container.get_blob_properties( blob_name=bytes(jobStoreFileID)) if blob.properties.etag != expectedVersion: container.release_blob_lease( blob_name=bytes(jobStoreFileID), lease_id=leaseID) raise ConcurrentFileModificationException( jobStoreFileID) # commit the file, container.put_block_list( blob_name=bytes(jobStoreFileID), block_list=blocks, lease_id=leaseID, metadata=dict(encrypted=str(encrypted))) # then release the lock. container.release_blob_lease( blob_name=bytes(jobStoreFileID), lease_id=leaseID) else: # No need to check for modification, just blindly write over whatever # was there. container.put_block_list( blob_name=bytes(jobStoreFileID), block_list=blocks, metadata=dict(encrypted=str(encrypted)))
def _registerFile(self, jobStoreFileID, bucketName='files', jobStoreID=None, newVersion=None, oldVersion=None): """ Register a a file in the store :param jobStoreFileID: the file's ID, mandatory :param bucketName: the name of the S3 bucket the file was placed in :param jobStoreID: the ID of the batchjob owning the file, only allowed for first version of file or when file is registered without content :param newVersion: the file's new version or None if the file is to be registered without content, in which case jobStoreId must be passed :param oldVersion: the expected previous version of the file or None if newVersion is the first version or file is registered without content """ # Must pass either jobStoreID or newVersion, or both assert jobStoreID is not None or newVersion is not None # Must pass newVersion if passing oldVersion assert oldVersion is None or newVersion is not None attributes = dict(bucketName=bucketName) if newVersion is not None: attributes['version'] = newVersion if jobStoreID is not None: attributes['jobStoreID'] = jobStoreID # False stands for absence expected = ['version', False if oldVersion is None else oldVersion] try: for attempt in retry_sdb(): with attempt: assert self.versions.put_attributes( item_name=jobStoreFileID, attributes=attributes, expected_value=expected) if oldVersion is not None: bucket = getattr(self, bucketName) bucket.delete_key(jobStoreFileID, version_id=oldVersion) except SDBResponseError as e: if e.error_code == 'ConditionalCheckFailed': raise ConcurrentFileModificationException(jobStoreFileID) else: raise